diff --git a/.backportrc.json b/.backportrc.json new file mode 100644 index 0000000000000..c790f84c9eb78 --- /dev/null +++ b/.backportrc.json @@ -0,0 +1,12 @@ +{ + "repoOwner": "aptos-labs", + "repoName": "aptos-core", + + "branchLabelMapping": { + "^v(\\d+).(\\d+)$": "aptos-release-v$1.$2" + }, + "commitConflicts": true, + "prTitle": "[cp][{{targetBranch}}] {{sourcePullRequest.title}}", + "targetPRLabels": ["cherry-pick"], + "assignees": ["thepomeranian"] +} diff --git a/.dockerignore b/.dockerignore index 34ec98bf8c62c..3a4bd7c40a38b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -20,7 +20,7 @@ !**/*.errmap !config/src/config/test_data !aptos-move/aptos-gas-profiling/templates/ -!aptos-move/aptos-release-builder/data/release.yaml +!aptos-move/aptos-release-builder/data/*.yaml !aptos-move/aptos-release-builder/data/proposals/* !aptos-move/framework/ !aptos-move/move-examples/hello_blockchain/ diff --git a/.github/actions/rust-check-merge-base/action.yaml b/.github/actions/rust-check-merge-base/action.yaml new file mode 100644 index 0000000000000..32906536122b2 --- /dev/null +++ b/.github/actions/rust-check-merge-base/action.yaml @@ -0,0 +1,19 @@ +name: Rust Check Merge Base +description: Runs the rust merge base freshness check +inputs: + GIT_CREDENTIALS: + description: "Optional credentials to pass to git. Useful if you need to pull private repos for dependencies" + required: false + +runs: + using: composite + steps: + # The source code must be checked out by the workflow that invokes this action. + - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main + with: + GIT_CREDENTIALS: ${{ inputs.GIT_CREDENTIALS }} + + # Check the freshness of the merge base + - name: Check the freshness of the merge base + run: cargo x check-merge-base -vvv + shell: bash diff --git a/.github/actions/rust-doc-tests/action.yaml b/.github/actions/rust-doc-tests/action.yaml new file mode 100644 index 0000000000000..511a32565a1c6 --- /dev/null +++ b/.github/actions/rust-doc-tests/action.yaml @@ -0,0 +1,16 @@ +name: Rust Doc Tests +description: Runs only the rust doc tests +inputs: + GIT_CREDENTIALS: + description: "Optional credentials to pass to git. Useful if you need to pull private repos for dependencies" + required: false + +runs: + using: composite + steps: + # The source code must be checked out by the workflow that invokes this action. + + # Run the rust doc tests + - name: Run rust doc tests + run: cargo test --profile ci --locked --doc --workspace --exclude aptos-node-checker + shell: bash diff --git a/.github/workflows/backport-to-release-branches.yaml b/.github/workflows/backport-to-release-branches.yaml new file mode 100644 index 0000000000000..e006d95b78d7e --- /dev/null +++ b/.github/workflows/backport-to-release-branches.yaml @@ -0,0 +1,38 @@ +name: Backport to Release Branches + +on: + pull_request_target: + types: ["labeled", "closed"] + +permissions: + contents: write + pull-requests: write + +jobs: + permission-check: + runs-on: ubuntu-latest + steps: + - name: Check repository permission for user which triggered workflow + uses: sushichop/action-repository-permission@13d208f5ae7a6a3fc0e5a7c2502c214983f0241c + with: + required-permission: write + comment-not-permitted: Sorry, you don't have permission to trigger this workflow. + + backport: + name: Backport PR + needs: [permission-check] + runs-on: ubuntu-latest + steps: + - name: Backport Action + uses: sorenlouv/backport-github-action@ad888e978060bc1b2798690dd9d03c4036560947 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + add_original_reviewers: true + + - name: Info log + if: ${{ success() }} + run: cat ~/.backport/backport.info.log + + - name: Debug log + if: ${{ failure() }} + run: cat ~/.backport/backport.debug.log diff --git a/CODEOWNERS b/CODEOWNERS index 73eb4fa743af3..e9f52c43400a2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -38,6 +38,9 @@ # Owners for the `/consensus` directory and all its subdirectories. /consensus/ @zekun000 @sasha8 @ibalajiarun +# Owners for consensus observer +/consensus/src/consensus-observer/ @joshlind @zekun000 + # Owners for quorum store. /consensus/src/quorum_store/ @bchocho @sasha8 @gelash @@ -74,7 +77,7 @@ /crates/aptos-telemetry-service @ibalajiarun @geekflyer # Owners for the inspection-service crate -/crates/inspection-service/ @joshlind @brianolson +/crates/inspection-service/ @joshlind # Owners for the `/dashboards` directory and all its subdirectories. /dashboards/ @aptos-labs/prod-eng @@ -90,7 +93,7 @@ /mempool/ @bchocho # Owners for the network and all its subdirectories. -/network/ @joshlind @brianolson +/network/ @joshlind @zekun000 # Owners for the scripts /scripts/ @aptos-labs/prod-eng diff --git a/Cargo.lock b/Cargo.lock index 1f78250d973b3..0662d30fc71ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,6 +108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "const-random", "getrandom 0.2.11", "once_cell", "serde", @@ -130,6 +131,27 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" +[[package]] +name = "allocative" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082af274fd02beef17b7f0725a49ecafe6c075ef56cac9d6363eb3916a9817ae" +dependencies = [ + "allocative_derive", + "ctor", +] + +[[package]] +name = "allocative_derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe233a377643e0fc1a56421d7c90acdec45c291b30345eb9f08e8d0ddce5a4ab" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "allocator-api2" version = "0.2.16" @@ -240,7 +262,7 @@ dependencies = [ [[package]] name = "aptos" -version = "3.4.1" +version = "4.2.0" dependencies = [ "anyhow", "aptos-api-types", @@ -284,13 +306,14 @@ dependencies = [ "chrono", "clap 4.4.14", "clap_complete", + "colored", "dashmap", "diesel", - "diesel-async 0.4.1 (git+https://github.com/weiznich/diesel_async.git?rev=d02798c67065d763154d7272dd0c09b39757d0f2)", + "diesel-async", "dirs", "futures", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "jemallocator", "maplit", "move-binary-format", @@ -298,6 +321,7 @@ dependencies = [ "move-cli", "move-command-line-common", "move-compiler", + "move-compiler-v2", "move-core-types", "move-coverage", "move-disassembler", @@ -311,7 +335,8 @@ dependencies = [ "poem", "processor", "rand 0.7.3", - "reqwest", + "regex", + "reqwest 0.11.23", "self_update", "serde", "serde_json", @@ -368,8 +393,8 @@ dependencies = [ "aptos-system-utils 0.1.0", "aptos-types", "bcs 0.1.4", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "sha256", "tokio", "url", @@ -416,14 +441,12 @@ dependencies = [ "aptos-storage-interface", "aptos-types", "aptos-vm", - "async-trait", "bcs 0.1.4", "bytes", "fail", "futures", "hex", - "hyper", - "itertools 0.12.1", + "itertools 0.13.0", "mime", "mini-moka", "move-core-types", @@ -438,7 +461,7 @@ dependencies = [ "proptest", "rand 0.7.3", "regex", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "tokio", @@ -459,6 +482,7 @@ dependencies = [ "aptos-executor-types", "aptos-framework", "aptos-genesis", + "aptos-indexer-grpc-table-info", "aptos-mempool", "aptos-mempool-notifications", "aptos-sdk", @@ -471,7 +495,7 @@ dependencies = [ "bcs 0.1.4", "bytes", "goldenfile", - "hyper", + "hyper 0.14.28", "rand 0.7.3", "regex", "serde_json", @@ -542,9 +566,11 @@ dependencies = [ "aptos-config", "aptos-crypto", "aptos-db", + "aptos-db-indexer-schemas", "aptos-executor", "aptos-executor-test-helpers", "aptos-executor-types", + "aptos-indexer-grpc-table-info", "aptos-infallible", "aptos-jellyfish-merkle", "aptos-logger", @@ -561,7 +587,7 @@ dependencies = [ "clap 4.4.14", "csv", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "move-binary-format", "move-bytecode-verifier", "num_cpus", @@ -570,7 +596,7 @@ dependencies = [ "proptest", "rand 0.7.3", "regex", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "serde_yaml 0.8.26", @@ -597,9 +623,9 @@ dependencies = [ "aptos-types", "bcs 0.1.4", "bytes", - "hyper", + "hyper 0.14.28", "once_cell", - "reqwest", + "reqwest 0.11.23", "serde", "tokio", "tokio-stream", @@ -650,7 +676,7 @@ dependencies = [ "dashmap", "derivative", "fail", - "itertools 0.12.1", + "itertools 0.13.0", "move-binary-format", "move-core-types", "move-vm-types", @@ -677,7 +703,7 @@ dependencies = [ "clap 4.4.14", "criterion", "dashmap", - "itertools 0.12.1", + "itertools 0.13.0", "jemallocator", "move-core-types", "once_cell", @@ -730,7 +756,7 @@ dependencies = [ "env_logger", "guppy", "log", - "reqwest", + "reqwest 0.11.23", "url", ] @@ -773,7 +799,8 @@ dependencies = [ "bcs 0.1.4", "clap 4.4.14", "futures", - "itertools 0.12.1", + "itertools 0.13.0", + "move-binary-format", "move-compiler", "move-core-types", "move-model", @@ -820,7 +847,6 @@ dependencies = [ "get_if_addrs", "maplit", "num_cpus", - "number_range", "poem-openapi", "rand 0.7.3", "serde", @@ -861,6 +887,7 @@ dependencies = [ "aptos-logger", "aptos-mempool", "aptos-metrics-core", + "aptos-netcore", "aptos-network", "aptos-peer-monitoring-service-types", "aptos-reliable-broadcast", @@ -888,7 +915,7 @@ dependencies = [ "futures", "futures-channel", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "lru 0.7.8", "maplit", "mini-moka", @@ -900,6 +927,7 @@ dependencies = [ "once_cell", "ordered-float 3.9.2", "proptest", + "proptest-derive", "rand 0.7.3", "rayon", "scopeguard", @@ -946,9 +974,10 @@ dependencies = [ "aptos-short-hex-str", "aptos-types", "bcs 0.1.4", + "derivative", "fail", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "mini-moka", "mirai-annotations", "once_cell", @@ -1069,7 +1098,7 @@ dependencies = [ "claims", "dashmap", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "maplit", "mockall", "ordered-float 3.9.2", @@ -1143,7 +1172,7 @@ dependencies = [ "either", "hex", "indicatif 0.15.0", - "itertools 0.12.1", + "itertools 0.13.0", "lru 0.7.8", "move-core-types", "num-derive", @@ -1184,6 +1213,7 @@ name = "aptos-db-indexer-schemas" version = "0.1.0" dependencies = [ "anyhow", + "aptos-crypto", "aptos-proptest-helpers", "aptos-schemadb", "aptos-storage-interface", @@ -1215,7 +1245,7 @@ dependencies = [ "aptos-vm", "bcs 0.1.4", "clap 4.4.14", - "itertools 0.12.1", + "itertools 0.13.0", "tokio", ] @@ -1285,6 +1315,7 @@ dependencies = [ "aptos-network", "aptos-reliable-broadcast", "aptos-runtimes", + "aptos-safety-rules", "aptos-time-service", "aptos-types", "aptos-validator-transaction-pool", @@ -1380,6 +1411,7 @@ dependencies = [ "aptos-executor-types", "aptos-experimental-runtimes", "aptos-genesis", + "aptos-indexer-grpc-table-info", "aptos-infallible", "aptos-logger", "aptos-metrics-core", @@ -1395,7 +1427,7 @@ dependencies = [ "bytes", "dashmap", "fail", - "itertools 0.12.1", + "itertools 0.13.0", "move-core-types", "once_cell", "proptest", @@ -1438,7 +1470,7 @@ dependencies = [ "clap 4.4.14", "derivative", "indicatif 0.15.0", - "itertools 0.12.1", + "itertools 0.13.0", "jemallocator", "move-core-types", "num_cpus", @@ -1472,7 +1504,7 @@ dependencies = [ "crossbeam-channel", "ctrlc", "dashmap", - "itertools 0.12.1", + "itertools 0.13.0", "num_cpus", "once_cell", "rayon", @@ -1514,23 +1546,45 @@ dependencies = [ "aptos-types", "bcs 0.1.4", "criterion", - "itertools 0.12.1", + "itertools 0.13.0", "once_cell", "serde", "thiserror", ] +[[package]] +name = "aptos-experimental-hexy" +version = "0.1.0" +dependencies = [ + "anyhow", + "aptos-crypto", + "aptos-experimental-layered-map", + "aptos-infallible", + "aptos-metrics-core", + "criterion", + "itertools 0.13.0", + "jemallocator", + "once_cell", + "proptest", + "rand 0.7.3", +] + [[package]] name = "aptos-experimental-layered-map" version = "0.1.0" dependencies = [ + "ahash 0.8.11", "aptos-crypto", "aptos-drop-helper", "aptos-infallible", "aptos-metrics-core", "bitvec 1.0.1", + "criterion", + "itertools 0.13.0", + "jemallocator", "once_cell", "proptest", + "rand 0.7.3", ] [[package]] @@ -1610,7 +1664,7 @@ dependencies = [ "poem-openapi", "rand 0.7.3", "redis", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "serde_yaml 0.8.26", @@ -1654,7 +1708,7 @@ dependencies = [ "env_logger", "futures", "gcp-bigquery-client", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "tokio", @@ -1691,8 +1745,8 @@ dependencies = [ "either", "futures", "hex", - "hyper", - "itertools 0.12.1", + "hyper 0.14.28", + "itertools 0.13.0", "json-patch", "k8s-openapi", "kube", @@ -1701,7 +1755,7 @@ dependencies = [ "prometheus-http-query", "rand 0.7.3", "regex", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "serde_yaml 0.8.26", @@ -1733,7 +1787,7 @@ dependencies = [ "once_cell", "rand 0.7.3", "random_word", - "reqwest", + "reqwest 0.11.23", "serde_yaml 0.8.26", "tokio", "url", @@ -1773,7 +1827,7 @@ dependencies = [ "either", "flate2", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "libsecp256k1", "log", "lru 0.7.8", @@ -1782,6 +1836,7 @@ dependencies = [ "move-cli", "move-command-line-common", "move-compiler", + "move-compiler-v2", "move-core-types", "move-docgen", "move-model", @@ -1996,7 +2051,7 @@ dependencies = [ "once_cell", "prost 0.12.3", "redis", - "reqwest", + "reqwest 0.11.23", "serde", "tempfile", "tokio", @@ -2053,6 +2108,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "aptos-indexer-grpc-file-store-backfiller" +version = "1.0.0" +dependencies = [ + "anyhow", + "aptos-indexer-grpc-server-framework", + "aptos-indexer-grpc-utils", + "aptos-protos 1.3.1", + "async-trait", + "clap 4.4.14", + "futures", + "jemallocator", + "serde", + "serde_json", + "tokio", + "tonic 0.11.0", + "tracing", + "url", +] + [[package]] name = "aptos-indexer-grpc-fullnode" version = "1.0.0" @@ -2092,8 +2167,8 @@ dependencies = [ "futures", "goldenfile", "hex", - "hyper", - "itertools 0.12.1", + "hyper 0.14.28", + "itertools 0.13.0", "move-binary-format", "move-core-types", "move-package", @@ -2124,30 +2199,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "aptos-indexer-grpc-integration-tests" -version = "0.1.0" -dependencies = [ - "anyhow", - "aptos-config", - "aptos-indexer-grpc-cache-worker", - "aptos-indexer-grpc-file-store", - "aptos-indexer-grpc-server-framework", - "aptos-indexer-grpc-utils", - "aptos-logger", - "aptos-transaction-emitter-lib", - "aptos-transaction-generator-lib", - "aptos-types", - "once_cell", - "redis", - "regex", - "reqwest", - "tempfile", - "tokio", - "tracing", - "url", -] - [[package]] name = "aptos-indexer-grpc-server-framework" version = "1.0.0" @@ -2183,13 +2234,13 @@ dependencies = [ "aptos-logger", "aptos-mempool", "aptos-runtimes", - "aptos-schemadb", "aptos-storage-interface", "aptos-types", "flate2", "futures", "google-cloud-storage", - "hyper", + "hyper 0.14.28", + "itertools 0.13.0", "rocksdb", "serde", "serde_json", @@ -2198,7 +2249,6 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.7.10", - "tonic 0.11.0", ] [[package]] @@ -2215,7 +2265,7 @@ dependencies = [ "cloud-storage", "dashmap", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "lz4", "once_cell", "prometheus", @@ -2232,6 +2282,34 @@ dependencies = [ "url", ] +[[package]] +name = "aptos-indexer-test-transactions" +version = "1.0.0" +dependencies = [ + "aptos-protos 1.3.1", + "serde_json", +] + +[[package]] +name = "aptos-indexer-transaction-generator" +version = "1.0.0" +dependencies = [ + "anyhow", + "aptos-indexer-grpc-utils", + "aptos-protos 1.3.1", + "clap 4.4.14", + "futures", + "itertools 0.13.0", + "serde", + "serde_json", + "serde_yaml 0.8.26", + "tempfile", + "tokio", + "tokio-stream", + "tonic 0.11.0", + "url", +] + [[package]] name = "aptos-infallible" version = "0.1.0" @@ -2254,10 +2332,10 @@ dependencies = [ "aptos-time-service", "assert_approx_eq", "futures", - "hyper", + "hyper 0.14.28", "once_cell", "prometheus", - "reqwest", + "reqwest 0.11.23", "rusty-fork", "serde_json", "tokio", @@ -2280,7 +2358,7 @@ dependencies = [ "arr_macro", "bcs 0.1.4", "byteorder", - "itertools 0.12.1", + "itertools 0.13.0", "num-derive", "num-traits", "once_cell", @@ -2312,6 +2390,7 @@ dependencies = [ "aptos-network", "aptos-reliable-broadcast", "aptos-runtimes", + "aptos-safety-rules", "aptos-time-service", "aptos-types", "aptos-validator-transaction-pool", @@ -2333,9 +2412,9 @@ version = "0.1.0" dependencies = [ "anyhow", "aptos-types", - "http", + "http 0.2.11", "move-core-types", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "tokio", @@ -2362,7 +2441,7 @@ dependencies = [ "ark-ff", "base64 0.13.1", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "num-bigint 0.3.3", "num-modular", "num-traits", @@ -2426,13 +2505,15 @@ name = "aptos-keyless-pepper-example-client-rust" version = "0.1.0" dependencies = [ "aptos-crypto", + "aptos-infallible", "aptos-keyless-pepper-common", "aptos-types", "ark-bls12-381", "ark-serialize", "bcs 0.1.4", + "firestore", "hex", - "reqwest", + "reqwest 0.11.23", "serde_json", "tokio", ] @@ -2444,6 +2525,7 @@ dependencies = [ "aes-gcm", "anyhow", "aptos-crypto", + "aptos-infallible", "aptos-inspection-service", "aptos-keyless-pepper-common", "aptos-logger", @@ -2455,14 +2537,15 @@ dependencies = [ "ark-serialize", "bcs 0.1.4", "dashmap", + "firestore", "hex", - "hyper", + "hyper 0.14.28", "jsonwebtoken 8.3.0", "jwt", "once_cell", "rand 0.7.3", "regex", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "sha3 0.9.1", @@ -2495,6 +2578,7 @@ dependencies = [ "aptos-vm-types", "bcs 0.1.4", "bytes", + "claims", "goldenfile", "move-binary-format", "move-command-line-common", @@ -2601,7 +2685,7 @@ dependencies = [ "enum_dispatch", "fail", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "maplit", "num_cpus", "once_cell", @@ -2652,6 +2736,7 @@ name = "aptos-move-debugger" version = "0.1.0" dependencies = [ "anyhow", + "aptos-block-executor", "aptos-consensus", "aptos-crypto", "aptos-gas-profiling", @@ -2664,8 +2749,9 @@ dependencies = [ "aptos-vm-types", "bcs 0.1.4", "clap 4.4.14", + "itertools 0.13.0", "regex", - "reqwest", + "reqwest 0.11.23", "tokio", "url", ] @@ -2730,7 +2816,7 @@ dependencies = [ [[package]] name = "aptos-moving-average" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=5244b84fa5ed872e5280dc8df032d744d62ad29d#5244b84fa5ed872e5280dc8df032d744d62ad29d" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f#fa1ce4947f4c2be57529f1c9732529e05a06cb7f" dependencies = [ "chrono", ] @@ -2820,7 +2906,7 @@ dependencies = [ "futures", "futures-util", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "maplit", "once_cell", "ordered-float 3.9.2", @@ -2932,26 +3018,27 @@ dependencies = [ "aptos-indexer-grpc-server-framework", "aptos-metrics-core", "async-trait", + "axum 0.7.5", "backoff", "bytes", "chrono", "clap 4.4.14", "diesel", "diesel_migrations", + "enum_dispatch", "field_count", "futures", "google-cloud-storage", "image", "once_cell", "regex", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "sha256", "tokio", "tracing", "url", - "warp", ] [[package]] @@ -2996,8 +3083,6 @@ dependencies = [ "aptos-peer-monitoring-service-server", "aptos-peer-monitoring-service-types", "aptos-runtimes", - "aptos-safety-rules", - "aptos-schemadb", "aptos-state-sync-driver", "aptos-storage-interface", "aptos-storage-service-client", @@ -3047,7 +3132,7 @@ dependencies = [ "poem", "poem-openapi", "prometheus-parse", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "serde_yaml 0.8.26", @@ -3094,7 +3179,6 @@ dependencies = [ name = "aptos-openapi" version = "0.1.0" dependencies = [ - "async-trait", "percent-encoding", "poem", "poem-openapi", @@ -3121,7 +3205,7 @@ version = "0.1.0" dependencies = [ "anyhow", "aptos-framework", - "itertools 0.12.1", + "itertools 0.13.0", "move-command-line-common", "move-package", "tempfile", @@ -3235,8 +3319,7 @@ dependencies = [ [[package]] name = "aptos-protos" -version = "1.3.0" -source = "git+https://github.com/aptos-labs/aptos-core.git?tag=aptos-node-v1.12.1#4b9a2593facaee92b28df2e99b2773a7e4f930f5" +version = "1.3.1" dependencies = [ "futures-core", "pbjson", @@ -3248,6 +3331,7 @@ dependencies = [ [[package]] name = "aptos-protos" version = "1.3.1" +source = "git+https://github.com/aptos-labs/aptos-core.git?rev=5c48aee129b5a141be2792ffa3d9bd0a1a61c9cb#5c48aee129b5a141be2792ffa3d9bd0a1a61c9cb" dependencies = [ "futures-core", "pbjson", @@ -3301,19 +3385,29 @@ dependencies = [ "aptos-genesis", "aptos-infallible", "aptos-keygen", + "aptos-language-e2e-tests", + "aptos-move-debugger", "aptos-rest-client", "aptos-temppath", "aptos-types", + "aptos-vm", + "aptos-vm-logging", + "aptos-vm-types", "bcs 0.1.4", "clap 4.4.14", "futures", "git2 0.16.1", "handlebars", "hex", + "move-binary-format", + "move-bytecode-verifier", "move-core-types", "move-model", + "move-vm-runtime", + "move-vm-types", "once_cell", - "reqwest", + "parking_lot 0.12.1", + "reqwest 0.11.23", "serde", "serde_json", "serde_yaml 0.8.26", @@ -3322,6 +3416,7 @@ dependencies = [ "strum_macros 0.24.3", "tokio", "url", + "walkdir", ] [[package]] @@ -3373,7 +3468,7 @@ dependencies = [ "clap 4.4.14", "hex", "move-core-types", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "thiserror", @@ -3417,13 +3512,12 @@ dependencies = [ "clap 4.4.14", "futures", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "move-core-types", "once_cell", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", - "serde_yaml 0.8.26", "tokio", "url", "warp", @@ -3457,7 +3551,6 @@ dependencies = [ name = "aptos-safety-rules" version = "0.1.0" dependencies = [ - "anyhow", "aptos-config", "aptos-consensus-types", "aptos-crypto", @@ -3472,6 +3565,7 @@ dependencies = [ "aptos-vault-client", "claims", "criterion", + "hex", "once_cell", "proptest", "rand 0.7.3", @@ -3512,7 +3606,7 @@ dependencies = [ "aptos-types", "bitvec 1.0.1", "criterion", - "itertools 0.12.1", + "itertools 0.13.0", "jemallocator", "once_cell", "proptest", @@ -3644,7 +3738,6 @@ dependencies = [ "aptos-data-client", "aptos-data-streaming-service", "aptos-db", - "aptos-db-indexer-schemas", "aptos-event-notifications", "aptos-executor", "aptos-executor-test-helpers", @@ -3792,8 +3885,8 @@ dependencies = [ "anyhow", "aptos-profiler 0.1.0", "async-mutex", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "lazy_static", "mime", "pprof", @@ -3812,8 +3905,8 @@ dependencies = [ "anyhow", "aptos-profiler 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93)", "async-mutex", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "lazy_static", "mime", "pprof", @@ -3868,7 +3961,7 @@ dependencies = [ "prometheus", "rand 0.7.3", "rand_core 0.5.1", - "reqwest", + "reqwest 0.11.23", "reqwest-middleware", "reqwest-retry", "serde", @@ -3907,7 +4000,7 @@ dependencies = [ "prometheus", "rand 0.7.3", "rand_core 0.5.1", - "reqwest", + "reqwest 0.11.23", "reqwest-middleware", "reqwest-retry", "serde", @@ -3952,9 +4045,9 @@ dependencies = [ "csv", "futures", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "rand 0.7.3", - "reqwest", + "reqwest 0.11.23", "tokio", "tokio-scoped", ] @@ -3992,9 +4085,7 @@ dependencies = [ "criterion", "criterion-cpu-time", "num_cpus", - "once_cell", "proptest", - "rayon", ] [[package]] @@ -4030,11 +4121,11 @@ dependencies = [ "async-trait", "clap 4.4.14", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "once_cell", "rand 0.7.3", "rand_core 0.5.1", - "reqwest", + "reqwest 0.11.23", "serde", "tokio", "url", @@ -4140,7 +4231,7 @@ dependencies = [ "fxhash", "hashbrown 0.14.3", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "jsonwebtoken 8.3.0", "move-binary-format", "move-bytecode-verifier", @@ -4164,7 +4255,7 @@ dependencies = [ "rand 0.7.3", "rayon", "regex", - "reqwest", + "reqwest 0.11.23", "ring 0.16.20", "rsa 0.9.6", "serde", @@ -4314,6 +4405,7 @@ dependencies = [ "aptos-vm", "bcs 0.1.4", "bytes", + "claims", "move-core-types", "move-vm-runtime", "move-vm-types", @@ -4419,25 +4511,12 @@ dependencies = [ "aptos-config", "aptos-logger", "bcs 0.1.4", - "hyper", + "hyper 0.14.28", "serde", "serde_json", "warp", ] -[[package]] -name = "aptos-writeset-generator" -version = "0.1.0" -dependencies = [ - "anyhow", - "aptos-crypto", - "aptos-types", - "aptos-vm", - "move-core-types", - "move-vm-runtime", - "move-vm-types", -] - [[package]] name = "arbitrary" version = "1.3.2" @@ -4754,9 +4833,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.5" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" dependencies = [ "flate2", "futures-core", @@ -5020,102 +5099,102 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.17" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core 0.2.9", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "itoa", - "matchit 0.5.0", + "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", + "rustversion", "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "tokio", + "sync_wrapper 0.1.2", "tower", - "tower-http 0.3.5", "tower-layer", "tower-service", ] [[package]] name = "axum" -version = "0.6.20" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", - "axum-core 0.3.4", - "bitflags 1.3.2", + "axum-core 0.4.3", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.1", + "hyper-util", "itoa", - "matchit 0.7.3", + "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-core" -version = "0.2.9" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.11", + "http-body 0.4.6", "mime", + "rustversion", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", -] - -[[package]] -name = "axum-test" -version = "0.1.0" -dependencies = [ - "axum 0.5.17", - "tokio", + "tracing", ] [[package]] @@ -5314,7 +5393,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -5357,9 +5436,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitmaps" @@ -5537,8 +5616,8 @@ dependencies = [ "futures-core", "futures-util", "hex", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "hyperlocal", "log", "pin-project-lite", @@ -5723,6 +5802,19 @@ dependencies = [ "serde", ] +[[package]] +name = "canonical_json" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f89083fd014d71c47a718d7f4ac050864dac8587668dbe90baf9e261064c5710" +dependencies = [ + "hex", + "regex", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "captcha" version = "0.0.9" @@ -5826,11 +5918,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -5838,7 +5936,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] @@ -5988,6 +6086,7 @@ dependencies = [ "anstyle", "clap_lex 0.6.0", "strsim 0.10.0", + "terminal_size", ] [[package]] @@ -6065,7 +6164,7 @@ dependencies = [ "lazy_static", "pem 0.8.3", "percent-encoding", - "reqwest", + "reqwest 0.11.23", "ring 0.16.20", "serde", "serde_json", @@ -6214,6 +6313,26 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom 0.2.11", + "once_cell", + "tiny-keccak", +] + [[package]] name = "const_fn" version = "0.4.9" @@ -6259,24 +6378,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] -name = "cookie" -version = "0.16.2" +name = "convert_case" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" dependencies = [ - "percent-encoding", + "unicode-segmentation", +] + +[[package]] +name = "cookie" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" +dependencies = [ + "percent-encoding", "time", "version_check", ] [[package]] name = "cookie" -version = "0.17.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7efb37c3e1ccb1ff97164ad95ac1606e8ccd35b3fa0a7d99a304c7f4a428cc24" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" dependencies = [ "aes-gcm", - "base64 0.21.6", + "base64 0.22.1", "hkdf 0.12.4", "hmac 0.12.1", "percent-encoding", @@ -6591,6 +6719,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "ctr" version = "0.9.2" @@ -6706,16 +6844,6 @@ dependencies = [ "darling_macro 0.13.4", ] -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", -] - [[package]] name = "darling" version = "0.20.9" @@ -6740,20 +6868,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_core" version = "0.20.9" @@ -6779,17 +6893,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.9" @@ -6978,7 +7081,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", @@ -7015,7 +7118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" dependencies = [ "bigdecimal", - "bitflags 2.4.1", + "bitflags 2.6.0", "byteorder", "chrono", "diesel_derives", @@ -7028,20 +7131,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "diesel-async" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acada1517534c92d3f382217b485db8a8638f111b0e3f2a2a8e26165050f77be" -dependencies = [ - "async-trait", - "diesel", - "futures-util", - "scoped-futures", - "tokio", - "tokio-postgres", -] - [[package]] name = "diesel-async" version = "0.4.1" @@ -7056,18 +7145,6 @@ dependencies = [ "tokio-postgres", ] -[[package]] -name = "diesel_async_migrations" -version = "0.11.0" -source = "git+https://github.com/niroco/diesel_async_migrations?rev=11f331b73c5cfcc894380074f748d8fda710ac12#11f331b73c5cfcc894380074f748d8fda710ac12" -dependencies = [ - "diesel", - "diesel-async 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "macros", - "scoped-futures", - "tracing", -] - [[package]] name = "diesel_derives" version = "2.1.2" @@ -7272,6 +7349,7 @@ dependencies = [ "aptos-gas-schedule", "aptos-language-e2e-tests", "aptos-package-builder", + "aptos-transaction-generator-lib", "aptos-types", "aptos-vm", "aptos-vm-types", @@ -7870,6 +7948,29 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" +[[package]] +name = "firestore" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11f7f676812c6a51d8584aa52252e2d3a02417ee0b27966c7059fe4ef80945d5" +dependencies = [ + "async-trait", + "backoff", + "chrono", + "futures", + "gcloud-sdk", + "hex", + "hyper 1.4.1", + "rand 0.8.5", + "rsb_derive", + "rvstruct", + "serde", + "struct-path", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "fixed" version = "1.25.1" @@ -8205,42 +8306,27 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" -[[package]] -name = "gcemeta" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d460327b24cc34c86d53d60a90e9e6044817f7906ebd9baa5c3d0ee13e1ecf" -dependencies = [ - "bytes", - "hyper", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "gcloud-sdk" -version = "0.20.7" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a24376e7850e7864bb326debc5765a1dda4fc47603c22e2bc0ebf30ff59141b" +checksum = "898e349fb0fabc16892de7858e5650b70a8044edeee13469cb2f7649040bf3c2" dependencies = [ "async-trait", + "bytes", "chrono", "futures", - "gcemeta", - "hyper", - "jsonwebtoken 8.3.0", + "hyper 1.4.1", + "jsonwebtoken 9.3.0", "once_cell", - "prost 0.11.9", - "prost-types 0.11.9", - "reqwest", + "prost 0.13.1", + "prost-types 0.13.1", + "reqwest 0.12.5", "secret-vault-value", "serde", "serde_json", "tokio", - "tonic 0.9.2", + "tonic 0.12.1", "tower", "tower-layer", "tower-util", @@ -8257,10 +8343,10 @@ dependencies = [ "async-stream", "async-trait", "dyn-clone", - "hyper", + "hyper 0.14.28", "hyper-rustls 0.24.2", "log", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "thiserror", @@ -8480,7 +8566,7 @@ dependencies = [ "google-cloud-token", "home", "jsonwebtoken 8.3.0", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "thiserror", @@ -8497,7 +8583,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8bdaaa4bc036e8318274d1b25f0f2265b3e95418b765fd1ea1c7ef938fd69bd" dependencies = [ "google-cloud-token", - "http", + "http 0.2.11", "thiserror", "tokio", "tokio-retry", @@ -8523,7 +8609,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" dependencies = [ - "reqwest", + "reqwest 0.11.23", "thiserror", "tokio", ] @@ -8564,7 +8650,7 @@ dependencies = [ "once_cell", "percent-encoding", "regex", - "reqwest", + "reqwest 0.11.23", "ring 0.16.20", "rsa 0.6.1", "serde", @@ -8661,7 +8747,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.11", + "indexmap 2.2.5", + "slab", + "tokio", + "tokio-util 0.7.10", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", "indexmap 2.2.5", "slab", "tokio", @@ -8682,6 +8787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b4af3693f1b705df946e9fe5631932443781d0aabb423b62fcd4d73f6d2fd0" dependencies = [ "crunchy", + "num-traits", ] [[package]] @@ -8762,8 +8868,23 @@ checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ "base64 0.21.6", "bytes", - "headers-core", - "http", + "headers-core 0.2.0", + "http 0.2.11", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.6", + "bytes", + "headers-core 0.3.0", + "http 1.1.0", "httpdate", "mime", "sha1", @@ -8775,7 +8896,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.11", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", ] [[package]] @@ -8937,6 +9067,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -8944,7 +9085,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", "pin-project-lite", ] @@ -8980,7 +9144,7 @@ dependencies = [ "crossbeam-utils", "form_urlencoded", "futures-util", - "hyper", + "hyper 0.14.28", "isahc", "lazy_static", "levenshtein", @@ -9019,9 +9183,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.11", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -9033,14 +9197,35 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + [[package]] name = "hyper-rustls" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "rustls 0.20.9", "rustls-native-certs 0.6.3", @@ -9055,8 +9240,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "rustls 0.21.10", "rustls-native-certs 0.6.3", @@ -9064,18 +9249,49 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "rustls 0.23.7", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", +] + [[package]] name = "hyper-timeout" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper", + "hyper 0.14.28", "pin-project-lite", "tokio", "tokio-io-timeout", ] +[[package]] +name = "hyper-timeout" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +dependencies = [ + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -9083,12 +9299,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", ] +[[package]] +name = "hyper-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.4.1", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tower", + "tower-service", + "tracing", +] + [[package]] name = "hyperlocal" version = "0.8.0" @@ -9097,7 +9333,7 @@ checksum = "0fafdf7b2b2de7c9784f76e02c0935e65a8117ec3b768644379983ab333ac98c" dependencies = [ "futures-util", "hex", - "hyper", + "hyper 0.14.28", "pin-project 1.1.3", "tokio", ] @@ -9388,6 +9624,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + [[package]] name = "internment" version = "0.5.6" @@ -9467,7 +9709,7 @@ dependencies = [ "encoding_rs", "event-listener 2.5.3", "futures-lite 1.13.0", - "http", + "http 0.2.11", "log", "mime", "once_cell", @@ -9507,6 +9749,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.10" @@ -9610,6 +9861,21 @@ dependencies = [ "simple_asn1 0.6.2", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.6", + "js-sys", + "pem 3.0.4", + "ring 0.17.7", + "serde", + "serde_json", + "simple_asn1 0.6.2", +] + [[package]] name = "jwt" version = "0.16.0" @@ -9682,11 +9948,11 @@ dependencies = [ "dirs-next", "either", "futures", - "http", - "http-body", - "hyper", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-rustls 0.23.2", - "hyper-timeout", + "hyper-timeout 0.4.1", "jsonpath_lib", "k8s-openapi", "kube-core", @@ -9701,7 +9967,7 @@ dependencies = [ "tokio", "tokio-util 0.6.10", "tower", - "tower-http 0.2.5", + "tower-http", "tracing", ] @@ -9713,7 +9979,7 @@ checksum = "c52b6ab05d160691083430f6f431707a4e05b64903f2ffa0095ee5efde759117" dependencies = [ "chrono", "form_urlencoded", - "http", + "http 0.2.11", "json-patch", "k8s-openapi", "once_cell", @@ -9863,9 +10129,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libfuzzer-sys" @@ -9924,7 +10190,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "libc", "redox_syscall 0.4.1", ] @@ -10054,15 +10320,6 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" -[[package]] -name = "listener" -version = "0.1.0" -dependencies = [ - "bytes", - "clap 4.4.14", - "tokio", -] - [[package]] name = "lock_api" version = "0.4.11" @@ -10135,12 +10392,12 @@ dependencies = [ ] [[package]] -name = "macros" -version = "0.1.0" -source = "git+https://github.com/niroco/diesel_async_migrations?rev=11f331b73c5cfcc894380074f748d8fda710ac12#11f331b73c5cfcc894380074f748d8fda710ac12" +name = "lz4_flex" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" dependencies = [ - "proc-macro2", - "quote", + "twox-hash", ] [[package]] @@ -10170,12 +10427,6 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" -[[package]] -name = "matchit" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" - [[package]] name = "matchit" version = "0.7.3" @@ -10411,7 +10662,7 @@ dependencies = [ "better_any", "bytes", "datatest-stable", - "itertools 0.12.1", + "itertools 0.13.0", "move-binary-format", "move-command-line-common", "move-compiler", @@ -10578,6 +10829,7 @@ dependencies = [ "move-stdlib", "move-symbol-pool", "once_cell", + "pathdiff", "petgraph 0.5.1", "regex", "sha3 0.9.1", @@ -10605,9 +10857,10 @@ dependencies = [ "ethnum", "flexi_logger", "im", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-binary-format", + "move-borrow-graph", "move-bytecode-source-map", "move-bytecode-verifier", "move-command-line-common", @@ -10623,6 +10876,8 @@ dependencies = [ "num 0.4.1", "once_cell", "petgraph 0.5.1", + "strum 0.24.1", + "strum_macros 0.24.3", "walkdir", ] @@ -10630,9 +10885,8 @@ dependencies = [ name = "move-compiler-v2-transactional-tests" version = "0.1.0" dependencies = [ - "aptos-vm", "datatest-stable", - "itertools 0.12.1", + "itertools 0.13.0", "move-command-line-common", "move-compiler-v2", "move-model", @@ -10710,7 +10964,7 @@ dependencies = [ "codespan", "codespan-reporting", "datatest-stable", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-compiler", "move-core-types", @@ -10837,7 +11091,7 @@ dependencies = [ "codespan-reporting", "datatest-stable", "internment", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-binary-format", "move-bytecode-source-map", @@ -10865,7 +11119,7 @@ dependencies = [ "datatest-stable", "evm-exec-utils", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "move-abigen", "move-binary-format", "move-bytecode-source-map", @@ -10902,7 +11156,7 @@ dependencies = [ "clap 4.4.14", "codespan-reporting", "datatest-stable", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-abigen", "move-command-line-common", @@ -10933,7 +11187,7 @@ dependencies = [ "codespan", "codespan-reporting", "futures", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-binary-format", "move-command-line-common", @@ -10960,7 +11214,7 @@ dependencies = [ "anyhow", "codespan-reporting", "datatest-stable", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-binary-format", "move-core-types", @@ -11005,7 +11259,7 @@ dependencies = [ "datatest-stable", "ethnum", "im", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-binary-format", "move-core-types", @@ -11100,7 +11354,7 @@ dependencies = [ "ethnum", "evm", "evm-exec-utils", - "itertools 0.12.1", + "itertools 0.13.0", "maplit", "move-command-line-common", "move-compiler", @@ -11163,7 +11417,7 @@ dependencies = [ "difference", "evm", "evm-exec-utils", - "itertools 0.12.1", + "itertools 0.13.0", "move-binary-format", "move-bytecode-utils", "move-command-line-common", @@ -11177,6 +11431,7 @@ dependencies = [ "move-to-yul", "move-vm-runtime", "move-vm-test-utils", + "move-vm-types", "once_cell", "primitive-types 0.10.1", "rayon", @@ -11267,9 +11522,10 @@ name = "move-vm-types" version = "0.1.0" dependencies = [ "bcs 0.1.4", + "bytes", "claims", "derivative", - "itertools 0.12.1", + "itertools 0.13.0", "move-binary-format", "move-core-types", "proptest", @@ -11289,12 +11545,29 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http", + "http 0.2.11", "httparse", "log", "memchr", "mime", "spin 0.9.8", + "version_check", +] + +[[package]] +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http 1.1.0", + "httparse", + "memchr", + "mime", + "spin 0.9.8", "tokio", "version_check", ] @@ -11406,8 +11679,20 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", + "cfg-if", + "libc", +] + +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.6.0", "cfg-if", + "cfg_aliases", "libc", ] @@ -11715,17 +12000,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" -[[package]] -name = "number_range" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60080faccd4ca50ad0b801b2be686136376b13f691f6eac84817e40973b2e1bb" -dependencies = [ - "anyhow", - "itertools 0.10.5", - "num 0.4.1", -] - [[package]] name = "object" version = "0.32.2" @@ -11765,7 +12039,7 @@ version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "cfg-if", "foreign-types 0.3.2", "libc", @@ -11944,7 +12218,7 @@ version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 1.0.109", @@ -12004,6 +12278,40 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "parquet" +version = "52.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f22ba0d95db56dde8685e3fadcb915cdaadda31ab8abbe3ff7f0ad1ef333267" +dependencies = [ + "ahash 0.8.11", + "bytes", + "chrono", + "futures", + "half 2.2.1", + "hashbrown 0.14.3", + "lz4_flex", + "num 0.4.1", + "num-bigint 0.4.4", + "paste", + "seq-macro", + "thrift", + "tokio", + "twox-hash", +] + +[[package]] +name = "parquet_derive" +version = "52.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbfe02f8b63a15a78398db242f9b1d2dcc201319075ea6222c7108ffd48b23c0" +dependencies = [ + "parquet", + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "parse-zoneinfo" version = "0.3.0" @@ -12052,7 +12360,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "499cff8432e71c5f8784d9645aac0f9fca604d67f59b68a606170b5e229c6538" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "ciborium", "coset", "data-encoding", @@ -12146,6 +12454,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + [[package]] name = "pem-rfc7468" version = "0.3.1" @@ -12443,39 +12761,41 @@ dependencies = [ [[package]] name = "poem" -version = "1.3.59" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504774c97b0744c1ee108a37e5a65a9745a4725c4c06277521dabc28eb53a904" +version = "3.0.1" +source = "git+https://github.com/poem-web/poem.git?rev=809b2816d3504beeba140fef3fdfe9432d654c5b#809b2816d3504beeba140fef3fdfe9432d654c5b" dependencies = [ "anyhow", - "async-trait", "bytes", "chrono", - "cookie 0.17.0", + "cookie 0.18.1", "futures-util", - "headers", - "http", - "hyper", + "headers 0.4.0", + "http 1.1.0", + "http-body-util", + "hyper 1.4.1", + "hyper-util", "mime", - "multer", - "nix 0.27.1", + "multer 3.1.0", + "nix 0.28.0", "parking_lot 0.12.1", "percent-encoding", "pin-project-lite", "poem-derive", - "quick-xml 0.30.0", + "quick-xml 0.32.0", "regex", "rfc7239", - "rustls-pemfile 1.0.4", + "rustls-pemfile 2.1.1", "serde", "serde_json", "serde_urlencoded", + "serde_yaml 0.9.30", "smallvec", + "sync_wrapper 1.0.1", "tempfile", "thiserror", "time", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.25.0", "tokio-stream", "tokio-util 0.7.10", "tracing", @@ -12484,11 +12804,10 @@ dependencies = [ [[package]] name = "poem-derive" -version = "1.3.59" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ddcf4680d8d867e1e375116203846acb088483fa2070244f90589f458bbb31" +version = "3.0.0" +source = "git+https://github.com/poem-web/poem.git?rev=809b2816d3504beeba140fef3fdfe9432d654c5b#809b2816d3504beeba140fef3fdfe9432d654c5b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 2.0.48", @@ -12496,19 +12815,19 @@ dependencies = [ [[package]] name = "poem-openapi" -version = "2.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69c569eb0671cc85e65cfb6bd960d0168d24732ff58825227b4d2a10167ba91" +version = "5.0.2" +source = "git+https://github.com/poem-web/poem.git?rev=809b2816d3504beeba140fef3fdfe9432d654c5b#809b2816d3504beeba140fef3fdfe9432d654c5b" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "bytes", "derive_more", "futures-util", + "indexmap 2.2.5", "mime", "num-traits", "poem", "poem-openapi-derive", - "quick-xml 0.23.1", + "quick-xml 0.32.0", "regex", "serde", "serde_json", @@ -12521,19 +12840,18 @@ dependencies = [ [[package]] name = "poem-openapi-derive" -version = "2.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "274cf13f710999977a3c1e396c2a5000d104075a7127ce6470fbdae4706be621" +version = "5.0.2" +source = "git+https://github.com/poem-web/poem.git?rev=809b2816d3504beeba140fef3fdfe9432d654c5b#809b2816d3504beeba140fef3fdfe9432d654c5b" dependencies = [ - "darling 0.14.4", - "http", - "indexmap 1.9.3", + "darling 0.20.9", + "http 1.1.0", + "indexmap 2.2.5", "mime", - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "regex", - "syn 1.0.109", + "syn 2.0.48", "thiserror", ] @@ -12831,14 +13149,22 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "toml_datetime", "toml_edit 0.20.2", ] +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit 0.21.1", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -12887,40 +13213,46 @@ dependencies = [ [[package]] name = "processor" version = "1.0.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=5244b84fa5ed872e5280dc8df032d744d62ad29d#5244b84fa5ed872e5280dc8df032d744d62ad29d" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f#fa1ce4947f4c2be57529f1c9732529e05a06cb7f" dependencies = [ "ahash 0.8.11", + "allocative", + "allocative_derive", "anyhow", - "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=5244b84fa5ed872e5280dc8df032d744d62ad29d)", - "aptos-protos 1.3.0", + "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f)", + "aptos-protos 1.3.1 (git+https://github.com/aptos-labs/aptos-core.git?rev=5c48aee129b5a141be2792ffa3d9bd0a1a61c9cb)", "async-trait", - "base64 0.13.1", "bcs 0.1.4", "bigdecimal", + "bitflags 2.6.0", + "canonical_json", "chrono", "clap 4.4.14", "diesel", - "diesel-async 0.4.1 (git+https://github.com/weiznich/diesel_async.git?rev=d02798c67065d763154d7272dd0c09b39757d0f2)", - "diesel_async_migrations", + "diesel-async", "diesel_migrations", "enum_dispatch", "field_count", "futures", "futures-util", - "gcloud-sdk", "google-cloud-googleapis", "google-cloud-pubsub", + "google-cloud-storage", "hex", + "hyper 0.14.28", "itertools 0.12.1", "jemallocator", "kanal", + "lazy_static", "native-tls", + "num 0.4.1", "num_cpus", "once_cell", + "parquet", + "parquet_derive", "postgres-native-tls", "prometheus", "prost 0.12.3", - "prost-types 0.12.3", "regex", "serde", "serde_json", @@ -12928,6 +13260,7 @@ dependencies = [ "sha2 0.9.9", "sha3 0.9.1", "strum 0.24.1", + "tiny-keccak", "tokio", "tokio-postgres", "tonic 0.11.0", @@ -12977,7 +13310,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ae2f6a3f14ff35c16b51ac796d1dc73c15ad6472c48836c6c467f6d52266648" dependencies = [ - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "time", @@ -13004,7 +13337,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.6.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -13047,27 +13380,50 @@ dependencies = [ "prost-derive 0.12.3", ] +[[package]] +name = "prost" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +dependencies = [ + "bytes", + "prost-derive 0.13.1", +] + [[package]] name = "prost-derive" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-derive" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.11.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.48", ] [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.48", @@ -13091,6 +13447,15 @@ dependencies = [ "prost 0.12.3", ] +[[package]] +name = "prost-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" +dependencies = [ + "prost 0.13.1", +] + [[package]] name = "protobuf" version = "2.28.0" @@ -13124,9 +13489,10 @@ dependencies = [ "chrono", "clap 4.4.14", "codespan-reporting", - "itertools 0.12.1", + "itertools 0.13.0", "log", "move-compiler", + "move-compiler-v2", "move-model", "move-prover", "move-prover-boogie-backend", @@ -13199,7 +13565,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11bafc859c6815fbaffbbbf4229ecb767ac913fecb27f9ad4343662e9ef099ea" dependencies = [ "memchr", - "serde", ] [[package]] @@ -13213,9 +13578,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.30.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff6510e86862b57b210fd8cbe8ed3f0d7d600b9c2863cd4549a2e033c66e956" +checksum = "1d3a6e5838b60e0e8fa7a43f22ade549a37d61f8bdbe636d0d7816191de969c2" dependencies = [ "memchr", "serde", @@ -13233,6 +13598,52 @@ dependencies = [ "parking_lot 0.12.1", ] +[[package]] +name = "quinn" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.7", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" +dependencies = [ + "bytes", + "rand 0.8.5", + "ring 0.17.7", + "rustc-hash", + "rustls 0.23.7", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +dependencies = [ + "libc", + "once_cell", + "socket2 0.5.5", + "windows-sys 0.52.0", +] + [[package]] name = "quote" version = "1.0.35" @@ -13531,7 +13942,6 @@ version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "async-compression", "base64 0.21.6", "bytes", "cookie 0.16.2", @@ -13539,10 +13949,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.26", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-rustls 0.24.2", "hyper-tls", "ipnet", @@ -13568,10 +13978,56 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-streams", + "wasm-streams 0.3.0", "web-sys", "webpki-roots 0.25.3", - "winreg", + "winreg 0.50.0", +] + +[[package]] +name = "reqwest" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +dependencies = [ + "async-compression", + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls 0.27.2", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "mime_guess", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.7", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.1", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-rustls 0.26.0", + "tokio-util 0.7.10", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams 0.4.0", + "web-sys", + "winreg 0.52.0", ] [[package]] @@ -13582,8 +14038,8 @@ checksum = "88a3e86aa6053e59030e7ce2d2a3b258dd08fc2d337d52f73f6cb480f5858690" dependencies = [ "anyhow", "async-trait", - "http", - "reqwest", + "http 0.2.11", + "reqwest 0.11.23", "serde", "task-local-extensions", "thiserror", @@ -13600,10 +14056,10 @@ dependencies = [ "chrono", "futures", "getrandom 0.2.11", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "parking_lot 0.11.2", - "reqwest", + "reqwest 0.11.23", "reqwest-middleware", "retry-policies", "task-local-extensions", @@ -13767,6 +14223,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rsb_derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2c53e42fccdc5f1172e099785fe78f89bc0c1e657d0c2ef591efbfac427e9a4" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "rstack" version = "0.3.3" @@ -13881,7 +14348,7 @@ version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys 0.4.12", @@ -14049,6 +14516,26 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "rvs_derive" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1fa12378eb54f3d4f2db8dcdbe33af610b7e7d001961c1055858282ecef2a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "rvstruct" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5107860ec34506b64cf3680458074eac5c2c564f7ccc140918bbcd1714fd8d5d" +dependencies = [ + "rvs_derive", +] + [[package]] name = "ryu" version = "1.0.16" @@ -14221,12 +14708,12 @@ name = "self_update" version = "0.39.0" source = "git+https://github.com/banool/self_update.git?rev=8306158ad0fd5b9d4766a3c6bf967e7ef0ea5c4b#8306158ad0fd5b9d4766a3c6bf967e7ef0ea5c4b" dependencies = [ - "hyper", + "hyper 0.14.28", "indicatif 0.17.7", "log", "quick-xml 0.23.1", "regex", - "reqwest", + "reqwest 0.11.23", "self-replace", "semver", "serde_json", @@ -14246,14 +14733,10 @@ dependencies = [ ] [[package]] -name = "sender" -version = "0.1.0" -dependencies = [ - "bytes", - "clap 4.4.14", - "event-listener 2.5.3", - "tokio", -] +name = "seq-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" @@ -14374,6 +14857,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_regex" version = "1.1.0" @@ -14493,14 +14986,13 @@ dependencies = [ [[package]] name = "server-framework" version = "1.0.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=5244b84fa5ed872e5280dc8df032d744d62ad29d#5244b84fa5ed872e5280dc8df032d744d62ad29d" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=fa1ce4947f4c2be57529f1c9732529e05a06cb7f#fa1ce4947f4c2be57529f1c9732529e05a06cb7f" dependencies = [ "anyhow", "aptos-system-utils 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=4541add3fd29826ec57f22658ca286d2d6134b93)", "async-trait", "backtrace", "clap 4.4.14", - "futures", "prometheus", "serde", "serde_yaml 0.8.26", @@ -14846,6 +15338,7 @@ dependencies = [ "aptos-consensus", "aptos-crypto", "aptos-db", + "aptos-db-indexer", "aptos-db-indexer-schemas", "aptos-debugger", "aptos-dkg", @@ -14881,13 +15374,13 @@ dependencies = [ "digest 0.9.0", "futures", "hex", - "hyper", + "hyper 0.14.28", "move-core-types", "num_cpus", "once_cell", "rand 0.7.3", "regex", - "reqwest", + "reqwest 0.11.23", "serde", "serde_json", "serde_yaml 0.8.26", @@ -15056,6 +15549,15 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "struct-path" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899edf28cf7320503eda593b4bbce1bc5e9533501a11d45537e2c5be90128fc7" +dependencies = [ + "convert_case 0.6.0", +] + [[package]] name = "structopt" version = "0.3.26" @@ -15162,7 +15664,7 @@ dependencies = [ "anyhow", "aptos-types", "async-trait", - "reqwest", + "reqwest 0.11.23", ] [[package]] @@ -15216,6 +15718,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] + [[package]] name = "sysinfo" version = "0.28.4" @@ -15358,6 +15869,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.28", + "windows-sys 0.48.0", +] + [[package]] name = "termtree" version = "0.4.1" @@ -15405,7 +15926,7 @@ dependencies = [ "crossbeam-channel", "getrandom 0.2.11", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "module-generation", "move-binary-format", "move-bytecode-verifier", @@ -15416,7 +15937,6 @@ dependencies = [ "move-vm-test-utils", "move-vm-types", "num_cpus", - "once_cell", "rand 0.8.5", "tracing", "tracing-subscriber 0.3.18", @@ -15508,6 +16028,17 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float 2.10.1", +] + [[package]] name = "tiff" version = "0.9.0" @@ -15712,6 +16243,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.0" @@ -15822,9 +16364,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -15853,6 +16395,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.5", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.9.2" @@ -15867,15 +16420,14 @@ dependencies = [ "flate2", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", + "h2 0.3.26", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", + "hyper-timeout 0.4.1", "percent-encoding", "pin-project 1.1.3", "prost 0.11.9", - "rustls-native-certs 0.6.3", "rustls-pemfile 1.0.4", "tokio", "tokio-rustls 0.24.1", @@ -15898,11 +16450,11 @@ dependencies = [ "base64 0.22.1", "bytes", "flate2", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", + "h2 0.3.26", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", + "hyper-timeout 0.4.1", "percent-encoding", "pin-project 1.1.3", "prost 0.12.3", @@ -15920,6 +16472,39 @@ dependencies = [ "zstd", ] +[[package]] +name = "tonic" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.7.5", + "base64 0.22.1", + "bytes", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.1", + "hyper-timeout 0.5.1", + "hyper-util", + "percent-encoding", + "pin-project 1.1.3", + "prost 0.13.1", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.1", + "socket2 0.5.5", + "tokio", + "tokio-rustls 0.26.0", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tonic-reflection" version = "0.11.0" @@ -15964,8 +16549,8 @@ dependencies = [ "bytes", "futures-core", "futures-util", - "http", - "http-body", + "http 0.2.11", + "http-body 0.4.6", "http-range-header", "pin-project-lite", "tower-layer", @@ -15973,25 +16558,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags 1.3.2", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" @@ -16193,7 +16759,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.11", "httparse", "log", "rand 0.8.5", @@ -16203,6 +16769,16 @@ dependencies = [ "utf-8", ] +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "static_assertions", +] + [[package]] name = "typed-arena" version = "2.0.2" @@ -16504,9 +17080,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.6.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ "getrandom 0.2.11", "serde", @@ -16601,13 +17177,13 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "headers", - "http", - "hyper", + "headers 0.3.9", + "http 0.2.11", + "hyper 0.14.28", "log", "mime", "mime_guess", - "multer", + "multer 2.1.0", "percent-encoding", "pin-project 1.1.3", "rustls-pemfile 1.0.4", @@ -16630,9 +17206,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "948552bbb7a5fb4ba3169fd09b6c1ab53c1b2fdd82603295df550f7a1ec644c0" dependencies = [ - "hyper", + "hyper 0.14.28", "once_cell", - "reqwest", + "reqwest 0.11.23", "thiserror", "unicase", "warp", @@ -16735,6 +17311,19 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasm-streams" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasm-timer" version = "0.2.5" @@ -17099,6 +17688,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "wyz" version = "0.2.0" @@ -17166,8 +17765,8 @@ dependencies = [ "async-trait", "base64 0.21.6", "futures", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "hyper-rustls 0.24.2", "itertools 0.12.1", "log", diff --git a/Cargo.toml b/Cargo.toml index 1372c1480ae57..3e01cd584eeda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,6 @@ members = [ "aptos-move/mvhashmap", "aptos-move/package-builder", "aptos-move/vm-genesis", - "aptos-move/writeset-transaction-generator", "aptos-node", "aptos-utils", "config", @@ -118,12 +117,14 @@ members = [ "ecosystem/indexer-grpc/indexer-grpc-cache-worker", "ecosystem/indexer-grpc/indexer-grpc-data-service", "ecosystem/indexer-grpc/indexer-grpc-file-store", + "ecosystem/indexer-grpc/indexer-grpc-file-store-backfiller", "ecosystem/indexer-grpc/indexer-grpc-fullnode", "ecosystem/indexer-grpc/indexer-grpc-in-memory-cache-benchmark", - "ecosystem/indexer-grpc/indexer-grpc-integration-tests", "ecosystem/indexer-grpc/indexer-grpc-server-framework", "ecosystem/indexer-grpc/indexer-grpc-table-info", "ecosystem/indexer-grpc/indexer-grpc-utils", + "ecosystem/indexer-grpc/indexer-test-transactions", + "ecosystem/indexer-grpc/indexer-transaction-generator", "ecosystem/indexer-grpc/transaction-filter", "ecosystem/nft-metadata-crawler-parser", "ecosystem/node-checker", @@ -136,6 +137,7 @@ members = [ "execution/executor-types", "experimental/execution/ptx-executor", "experimental/runtimes", + "experimental/storage/hexy", "experimental/storage/layered-map", "keyless/circuit", "keyless/common", @@ -180,9 +182,6 @@ members = [ "storage/schemadb", "storage/scratchpad", "storage/storage-interface", - "testsuite/dos/http_test", - "testsuite/dos/listener", - "testsuite/dos/sender", "testsuite/forge", "testsuite/forge-cli", "testsuite/fuzzer", @@ -334,6 +333,7 @@ aptos-enum-conversion-derive = { path = "crates/aptos-enum-conversion-derive" } aptos-executor-service = { path = "execution/executor-service" } aptos-executor-test-helpers = { path = "execution/executor-test-helpers" } aptos-executor-types = { path = "execution/executor-types" } +aptos-experimental-hexy = { path = "experimental/storage/hexy" } aptos-experimental-layered-map = { path = "experimental/storage/layered-map" } aptos-experimental-ptx-executor = { path = "experimental/execution/ptx-executor" } aptos-experimental-runtimes = { path = "experimental/runtimes" } @@ -360,11 +360,14 @@ aptos-indexer = { path = "crates/indexer" } aptos-indexer-grpc-cache-worker = { path = "ecosystem/indexer-grpc/indexer-grpc-cache-worker" } aptos-indexer-grpc-data-service = { path = "ecosystem/indexer-grpc/indexer-grpc-data-service" } aptos-indexer-grpc-file-store = { path = "ecosystem/indexer-grpc/indexer-grpc-file-store" } +aptos-indexer-grpc-file-store-backfiller = { path = "ecosystem/indexer-grpc/indexer-grpc-file-store-backfiller" } aptos-indexer-grpc-fullnode = { path = "ecosystem/indexer-grpc/indexer-grpc-fullnode" } aptos-indexer-grpc-in-memory-cache-benchmark = { path = "ecosystem/indexer-grpc/indexer-grpc-in-memory-cache-benchmark" } aptos-indexer-grpc-table-info = { path = "ecosystem/indexer-grpc/indexer-grpc-table-info" } +aptos-indexer-test-transactions = { path = "ecosystem/indexer-grpc/indexer-test-transactions" } aptos-indexer-grpc-utils = { path = "ecosystem/indexer-grpc/indexer-grpc-utils" } aptos-indexer-grpc-server-framework = { path = "ecosystem/indexer-grpc/indexer-grpc-server-framework" } +aptos-indexer-transaction-generator = { path = "ecosystem/indexer-grpc/indexer-transaction-generator" } aptos-infallible = { path = "crates/aptos-infallible" } aptos-inspection-service = { path = "crates/aptos-inspection-service" } aptos-jellyfish-merkle = { path = "storage/jellyfish-merkle" } @@ -457,7 +460,6 @@ aptos-vm-genesis = { path = "aptos-move/vm-genesis" } aptos-vm-types = { path = "aptos-move/aptos-vm-types" } aptos-vm-validator = { path = "vm-validator" } aptos-warp-webserver = { path = "crates/aptos-warp-webserver" } -aptos-writeset-generator = { path = "aptos-move/writeset-transaction-generator" } aptos-cargo-cli = { path = "devtools/aptos-cargo-cli" } # External crate dependencies. @@ -488,7 +490,7 @@ async-mutex = "1.4.0" async-recursion = "1.0.5" async-stream = "0.3" async-trait = "0.1.53" -axum = "0.5.16" +axum = "0.7.5" base64 = "0.13.0" base64-url = "2.0.1" backoff = { version = "0.4.0", features = ["tokio"] } @@ -514,7 +516,7 @@ cfg_block = "0.1.1" cfg-if = "1.0.0" ciborium = "0.2" claims = "0.7" -clap = { version = "4.3.9", features = ["derive", "env", "unstable-styles"] } +clap = { version = "4.3.9", features = ["derive", "env", "unstable-styles", "wrap_help"] } clap-verbosity-flag = "2.1.1" clap_complete = "4.4.1" cloud-storage = { version = "0.11.1", features = [ @@ -576,6 +578,7 @@ fail = "0.5.0" ff = { version = "0.13", features = ["derive"] } field_count = "0.1.1" file_diff = "1.0.0" +firestore = "0.43.0" fixed = "1.25.1" flate2 = "1.0.24" flexi_logger = "0.27.4" @@ -613,7 +616,7 @@ indoc = "1.0.6" inferno = "0.11.14" internment = { version = "0.5.0", features = ["arc"] } ipnet = "2.5.0" -itertools = "0.12" +itertools = "0.13" jemallocator = { version = "0.5.0", features = [ "profiling", "unprefixed_malloc_on_supported_platforms", @@ -647,7 +650,6 @@ num_cpus = "1.13.1" num-derive = "0.3.3" num-integer = "0.1.42" num-traits = "0.2.15" -number_range = "0.3.2" once_cell = "1.10.0" ordered-float = "3.9.1" ouroboros = "0.15.6" @@ -669,9 +671,10 @@ percent-encoding = "2.1.0" petgraph = "0.5.1" pin-project = "1.0.10" plotters = { version = "0.3.5", default-features = false } -poem = { version = "=1.3.59", features = ["anyhow", "rustls"] } -poem-openapi = { version = "=2.0.11", features = ["swagger-ui", "url"] } -poem-openapi-derive = "=2.0.11" +# We're using git deps until https://github.com/poem-web/poem/pull/829 gets formally released. +poem = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b", features = ["anyhow", "rustls"] } +poem-openapi = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b", features = ["swagger-ui", "url"] } +poem-openapi-derive = { git = "https://github.com/poem-web/poem.git", rev = "809b2816d3504beeba140fef3fdfe9432d654c5b" } poseidon-ark = { git = "https://github.com/arnaucube/poseidon-ark.git", rev = "6d2487aa1308d9d3860a2b724c485d73095c1c68" } pprof = { version = "0.11", features = ["flamegraph", "protobuf-codec"] } pretty = "0.10.0" @@ -705,6 +708,7 @@ reqwest = { version = "0.11.11", features = [ "blocking", "cookies", "json", + "multipart", "stream", ] } reqwest-middleware = "0.2.0" diff --git a/api/Cargo.toml b/api/Cargo.toml index c037526d47d02..fcc5efdca74ec 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -28,13 +28,11 @@ aptos-runtimes = { workspace = true } aptos-storage-interface = { workspace = true } aptos-types = { workspace = true } aptos-vm = { workspace = true } -async-trait = { workspace = true } bcs = { workspace = true } bytes = { workspace = true } fail = { workspace = true } futures = { workspace = true } hex = { workspace = true } -hyper = { workspace = true } itertools = { workspace = true } mime = { workspace = true } mini-moka = { workspace = true } diff --git a/api/doc/CHANGELOG.md b/api/doc/CHANGELOG.md index 440c4161df200..53e0caca35421 100644 --- a/api/doc/CHANGELOG.md +++ b/api/doc/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to the Supra Node API will be captured in this file. This ch **Note**: The Supra Node API does not follow semantic version while we are in active development. Instead, breaking changes will be announced with each devnet cut. Once we launch our mainnet, the API will follow semantic versioning closely. ## Unreleased -N/A +- OpenAPI layout changed slightly in some enum cases, see [#13929](https://github.com/aptos-labs/aptos-core/pull/13929) for more information. ## 1.2.0 (2022-09-29) - **[Breaking Changes]** Following the deprecation notice from the previous release, the following breaking changes have landed in this release. Please see the notes from last release for information on the new endpoints you must migrate to: diff --git a/api/doc/spec.html b/api/doc/spec.html index 99f0632e278ee..6a07046200dd6 100644 --- a/api/doc/spec.html +++ b/api/doc/spec.html @@ -4,7 +4,7 @@ - Aptos REST API + Aptos Node API diff --git a/api/doc/spec.json b/api/doc/spec.json index d36e4a6a0100d..bf944fc8e7000 100644 --- a/api/doc/spec.json +++ b/api/doc/spec.json @@ -13968,6 +13968,9 @@ }, { "$ref": "#/components/schemas/AccountSignature_MultiKeySignature" + }, + { + "$ref": "#/components/schemas/AccountSignature_NoAccountSignature" } ], "discriminator": { @@ -13976,7 +13979,8 @@ "ed25519_signature": "#/components/schemas/AccountSignature_Ed25519Signature", "multi_ed25519_signature": "#/components/schemas/AccountSignature_MultiEd25519Signature", "single_key_signature": "#/components/schemas/AccountSignature_SingleKeySignature", - "multi_key_signature": "#/components/schemas/AccountSignature_MultiKeySignature" + "multi_key_signature": "#/components/schemas/AccountSignature_MultiKeySignature", + "no_account_signature": "#/components/schemas/AccountSignature_NoAccountSignature" } } }, @@ -13990,6 +13994,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "ed25519_signature" + ], "example": "ed25519_signature" } } @@ -14009,6 +14016,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "multi_ed25519_signature" + ], "example": "multi_ed25519_signature" } } @@ -14028,6 +14038,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "multi_key_signature" + ], "example": "multi_key_signature" } } @@ -14037,6 +14050,28 @@ } ] }, + "AccountSignature_NoAccountSignature": { + "allOf": [ + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "no_account_signature" + ], + "example": "no_account_signature" + } + } + }, + { + "$ref": "#/components/schemas/NoAccountSignature" + } + ] + }, "AccountSignature_SingleKeySignature": { "allOf": [ { @@ -14047,6 +14082,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "single_key_signature" + ], "example": "single_key_signature" } } @@ -14743,6 +14781,17 @@ } } }, + "FederatedKeyless": { + "type": "object", + "required": [ + "value" + ], + "properties": { + "value": { + "$ref": "#/components/schemas/HexEncodedBytes" + } + } + }, "FeePayerSignature": { "type": "object", "description": "Fee payer signature for fee payer transactions\n\nThis allows you to have transactions across multiple accounts and with a fee payer", @@ -14842,6 +14891,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "write_set_payload" + ], "example": "write_set_payload" } } @@ -15267,6 +15319,7 @@ "required": [ "name", "is_native", + "is_event", "abilities", "generic_type_params", "fields" @@ -15279,6 +15332,10 @@ "type": "boolean", "description": "Whether the struct is a native struct of Move" }, + "is_event": { + "type": "boolean", + "description": "Whether the struct is marked with the #[event] annotation" + }, "abilities": { "type": "array", "description": "Abilities associated with the struct", @@ -15539,6 +15596,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "entry_function_payload" + ], "example": "entry_function_payload" } } @@ -15548,6 +15608,10 @@ } ] }, + "NoAccountSignature": { + "type": "object", + "description": "A placeholder to represent the absence of account signature" + }, "PendingTransaction": { "type": "object", "description": "A transaction waiting in mempool", @@ -15601,6 +15665,9 @@ }, { "$ref": "#/components/schemas/PublicKey_Keyless" + }, + { + "$ref": "#/components/schemas/PublicKey_FederatedKeyless" } ], "discriminator": { @@ -15609,7 +15676,8 @@ "ed25519": "#/components/schemas/PublicKey_Ed25519", "secp256k1_ecdsa": "#/components/schemas/PublicKey_Secp256k1Ecdsa", "secp256r1_ecdsa": "#/components/schemas/PublicKey_Secp256r1Ecdsa", - "keyless": "#/components/schemas/PublicKey_Keyless" + "keyless": "#/components/schemas/PublicKey_Keyless", + "federated_keyless": "#/components/schemas/PublicKey_FederatedKeyless" } } }, @@ -15623,6 +15691,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "ed25519" + ], "example": "ed25519" } } @@ -15632,6 +15703,28 @@ } ] }, + "PublicKey_FederatedKeyless": { + "allOf": [ + { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "federated_keyless" + ], + "example": "federated_keyless" + } + } + }, + { + "$ref": "#/components/schemas/FederatedKeyless" + } + ] + }, "PublicKey_Keyless": { "allOf": [ { @@ -15642,6 +15735,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "keyless" + ], "example": "keyless" } } @@ -15661,6 +15757,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "secp256k1_ecdsa" + ], "example": "secp256k1_ecdsa" } } @@ -15680,6 +15779,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "secp256r1_ecdsa" + ], "example": "secp256r1_ecdsa" } } @@ -15835,6 +15937,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "ed25519" + ], "example": "ed25519" } } @@ -15854,6 +15959,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "keyless" + ], "example": "keyless" } } @@ -15873,6 +15981,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "secp256k1_ecdsa" + ], "example": "secp256k1_ecdsa" } } @@ -15892,6 +16003,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "web_authn" + ], "example": "web_authn" } } @@ -16111,6 +16225,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "module_bundle_payload" + ], "example": "module_bundle_payload" } } @@ -16130,6 +16247,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "entry_function_payload" + ], "example": "entry_function_payload" } } @@ -16149,6 +16269,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "multisig_payload" + ], "example": "multisig_payload" } } @@ -16168,6 +16291,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "script_payload" + ], "example": "script_payload" } } @@ -16218,6 +16344,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "single_sender" + ], "example": "single_sender" } } @@ -16237,6 +16366,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "ed25519_signature" + ], "example": "ed25519_signature" } } @@ -16256,6 +16388,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "fee_payer_signature" + ], "example": "fee_payer_signature" } } @@ -16275,6 +16410,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "multi_agent_signature" + ], "example": "multi_agent_signature" } } @@ -16294,6 +16432,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "multi_ed25519_signature" + ], "example": "multi_ed25519_signature" } } @@ -16313,6 +16454,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "block_epilogue_transaction" + ], "example": "block_epilogue_transaction" } } @@ -16332,6 +16476,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "block_metadata_transaction" + ], "example": "block_metadata_transaction" } } @@ -16351,6 +16498,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "genesis_transaction" + ], "example": "genesis_transaction" } } @@ -16370,6 +16520,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "pending_transaction" + ], "example": "pending_transaction" } } @@ -16389,6 +16542,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "state_checkpoint_transaction" + ], "example": "state_checkpoint_transaction" } } @@ -16408,6 +16564,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "user_transaction" + ], "example": "user_transaction" } } @@ -16427,6 +16586,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "validator_transaction" + ], "example": "validator_transaction" } } @@ -16632,6 +16794,9 @@ "properties": { "validator_transaction_type": { "type": "string", + "enum": [ + "dkg_result" + ], "example": "dkg_result" } } @@ -16651,6 +16816,9 @@ "properties": { "validator_transaction_type": { "type": "string", + "enum": [ + "observed_jwk_update" + ], "example": "observed_jwk_update" } } @@ -16831,6 +16999,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "delete_module" + ], "example": "delete_module" } } @@ -16850,6 +17021,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "delete_resource" + ], "example": "delete_resource" } } @@ -16869,6 +17043,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "delete_table_item" + ], "example": "delete_table_item" } } @@ -16888,6 +17065,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "write_module" + ], "example": "write_module" } } @@ -16907,6 +17087,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "write_resource" + ], "example": "write_resource" } } @@ -16926,6 +17109,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "write_table_item" + ], "example": "write_table_item" } } @@ -16957,6 +17143,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "direct_write_set" + ], "example": "direct_write_set" } } @@ -16976,6 +17165,9 @@ "properties": { "type": { "type": "string", + "enum": [ + "script_write_set" + ], "example": "script_write_set" } } diff --git a/api/doc/spec.yaml b/api/doc/spec.yaml index 9e22e463b078d..a515d33940b19 100644 --- a/api/doc/spec.yaml +++ b/api/doc/spec.yaml @@ -10453,6 +10453,7 @@ components: - $ref: '#/components/schemas/AccountSignature_MultiEd25519Signature' - $ref: '#/components/schemas/AccountSignature_SingleKeySignature' - $ref: '#/components/schemas/AccountSignature_MultiKeySignature' + - $ref: '#/components/schemas/AccountSignature_NoAccountSignature' discriminator: propertyName: type mapping: @@ -10460,6 +10461,7 @@ components: multi_ed25519_signature: '#/components/schemas/AccountSignature_MultiEd25519Signature' single_key_signature: '#/components/schemas/AccountSignature_SingleKeySignature' multi_key_signature: '#/components/schemas/AccountSignature_MultiKeySignature' + no_account_signature: '#/components/schemas/AccountSignature_NoAccountSignature' AccountSignature_Ed25519Signature: allOf: - type: object @@ -10468,6 +10470,8 @@ components: properties: type: type: string + enum: + - ed25519_signature example: ed25519_signature - $ref: '#/components/schemas/Ed25519Signature' AccountSignature_MultiEd25519Signature: @@ -10478,6 +10482,8 @@ components: properties: type: type: string + enum: + - multi_ed25519_signature example: multi_ed25519_signature - $ref: '#/components/schemas/MultiEd25519Signature' AccountSignature_MultiKeySignature: @@ -10488,8 +10494,22 @@ components: properties: type: type: string + enum: + - multi_key_signature example: multi_key_signature - $ref: '#/components/schemas/MultiKeySignature' + AccountSignature_NoAccountSignature: + allOf: + - type: object + required: + - type + properties: + type: + type: string + enum: + - no_account_signature + example: no_account_signature + - $ref: '#/components/schemas/NoAccountSignature' AccountSignature_SingleKeySignature: allOf: - type: object @@ -10498,6 +10518,8 @@ components: properties: type: type: string + enum: + - single_key_signature example: single_key_signature - $ref: '#/components/schemas/SingleKeySignature' Address: @@ -11022,6 +11044,13 @@ components: $ref: '#/components/schemas/ExportedProviderJWKs' multi_sig: $ref: '#/components/schemas/ExportedAggregateSignature' + FederatedKeyless: + type: object + required: + - value + properties: + value: + $ref: '#/components/schemas/HexEncodedBytes' FeePayerSignature: type: object description: |- @@ -11090,6 +11119,8 @@ components: properties: type: type: string + enum: + - write_set_payload example: write_set_payload - $ref: '#/components/schemas/WriteSetPayload' GenesisTransaction: @@ -11409,6 +11440,7 @@ components: required: - name - is_native + - is_event - abilities - generic_type_params - fields @@ -11418,6 +11450,9 @@ components: is_native: type: boolean description: Whether the struct is a native struct of Move + is_event: + type: boolean + description: 'Whether the struct is marked with the #[event] annotation' abilities: type: array description: Abilities associated with the struct @@ -11677,8 +11712,13 @@ components: properties: type: type: string + enum: + - entry_function_payload example: entry_function_payload - $ref: '#/components/schemas/EntryFunctionPayload' + NoAccountSignature: + type: object + description: A placeholder to represent the absence of account signature PendingTransaction: type: object description: A transaction waiting in mempool @@ -11714,6 +11754,7 @@ components: - $ref: '#/components/schemas/PublicKey_Secp256k1Ecdsa' - $ref: '#/components/schemas/PublicKey_Secp256r1Ecdsa' - $ref: '#/components/schemas/PublicKey_Keyless' + - $ref: '#/components/schemas/PublicKey_FederatedKeyless' discriminator: propertyName: type mapping: @@ -11721,6 +11762,7 @@ components: secp256k1_ecdsa: '#/components/schemas/PublicKey_Secp256k1Ecdsa' secp256r1_ecdsa: '#/components/schemas/PublicKey_Secp256r1Ecdsa' keyless: '#/components/schemas/PublicKey_Keyless' + federated_keyless: '#/components/schemas/PublicKey_FederatedKeyless' PublicKey_Ed25519: allOf: - type: object @@ -11729,8 +11771,22 @@ components: properties: type: type: string + enum: + - ed25519 example: ed25519 - $ref: '#/components/schemas/Ed25519' + PublicKey_FederatedKeyless: + allOf: + - type: object + required: + - type + properties: + type: + type: string + enum: + - federated_keyless + example: federated_keyless + - $ref: '#/components/schemas/FederatedKeyless' PublicKey_Keyless: allOf: - type: object @@ -11739,6 +11795,8 @@ components: properties: type: type: string + enum: + - keyless example: keyless - $ref: '#/components/schemas/Keyless' PublicKey_Secp256k1Ecdsa: @@ -11749,6 +11807,8 @@ components: properties: type: type: string + enum: + - secp256k1_ecdsa example: secp256k1_ecdsa - $ref: '#/components/schemas/Secp256k1Ecdsa' PublicKey_Secp256r1Ecdsa: @@ -11759,6 +11819,8 @@ components: properties: type: type: string + enum: + - secp256r1_ecdsa example: secp256r1_ecdsa - $ref: '#/components/schemas/Secp256r1Ecdsa' RSA_JWK: @@ -11861,6 +11923,8 @@ components: properties: type: type: string + enum: + - ed25519 example: ed25519 - $ref: '#/components/schemas/Ed25519' Signature_Keyless: @@ -11871,6 +11935,8 @@ components: properties: type: type: string + enum: + - keyless example: keyless - $ref: '#/components/schemas/Keyless' Signature_Secp256k1Ecdsa: @@ -11881,6 +11947,8 @@ components: properties: type: type: string + enum: + - secp256k1_ecdsa example: secp256k1_ecdsa - $ref: '#/components/schemas/Secp256k1Ecdsa' Signature_WebAuthn: @@ -11891,6 +11959,8 @@ components: properties: type: type: string + enum: + - web_authn example: web_authn - $ref: '#/components/schemas/WebAuthn' SingleKeySignature: @@ -12038,6 +12108,8 @@ components: properties: type: type: string + enum: + - module_bundle_payload example: module_bundle_payload - $ref: '#/components/schemas/DeprecatedModuleBundlePayload' TransactionPayload_EntryFunctionPayload: @@ -12048,6 +12120,8 @@ components: properties: type: type: string + enum: + - entry_function_payload example: entry_function_payload - $ref: '#/components/schemas/EntryFunctionPayload' TransactionPayload_MultisigPayload: @@ -12058,6 +12132,8 @@ components: properties: type: type: string + enum: + - multisig_payload example: multisig_payload - $ref: '#/components/schemas/MultisigPayload' TransactionPayload_ScriptPayload: @@ -12068,6 +12144,8 @@ components: properties: type: type: string + enum: + - script_payload example: script_payload - $ref: '#/components/schemas/ScriptPayload' TransactionSignature: @@ -12095,6 +12173,8 @@ components: properties: type: type: string + enum: + - single_sender example: single_sender - $ref: '#/components/schemas/AccountSignature' TransactionSignature_Ed25519Signature: @@ -12105,6 +12185,8 @@ components: properties: type: type: string + enum: + - ed25519_signature example: ed25519_signature - $ref: '#/components/schemas/Ed25519Signature' TransactionSignature_FeePayerSignature: @@ -12115,6 +12197,8 @@ components: properties: type: type: string + enum: + - fee_payer_signature example: fee_payer_signature - $ref: '#/components/schemas/FeePayerSignature' TransactionSignature_MultiAgentSignature: @@ -12125,6 +12209,8 @@ components: properties: type: type: string + enum: + - multi_agent_signature example: multi_agent_signature - $ref: '#/components/schemas/MultiAgentSignature' TransactionSignature_MultiEd25519Signature: @@ -12135,6 +12221,8 @@ components: properties: type: type: string + enum: + - multi_ed25519_signature example: multi_ed25519_signature - $ref: '#/components/schemas/MultiEd25519Signature' Transaction_BlockEpilogueTransaction: @@ -12145,6 +12233,8 @@ components: properties: type: type: string + enum: + - block_epilogue_transaction example: block_epilogue_transaction - $ref: '#/components/schemas/BlockEpilogueTransaction' Transaction_BlockMetadataTransaction: @@ -12155,6 +12245,8 @@ components: properties: type: type: string + enum: + - block_metadata_transaction example: block_metadata_transaction - $ref: '#/components/schemas/BlockMetadataTransaction' Transaction_GenesisTransaction: @@ -12165,6 +12257,8 @@ components: properties: type: type: string + enum: + - genesis_transaction example: genesis_transaction - $ref: '#/components/schemas/GenesisTransaction' Transaction_PendingTransaction: @@ -12175,6 +12269,8 @@ components: properties: type: type: string + enum: + - pending_transaction example: pending_transaction - $ref: '#/components/schemas/PendingTransaction' Transaction_StateCheckpointTransaction: @@ -12185,6 +12281,8 @@ components: properties: type: type: string + enum: + - state_checkpoint_transaction example: state_checkpoint_transaction - $ref: '#/components/schemas/StateCheckpointTransaction' Transaction_UserTransaction: @@ -12195,6 +12293,8 @@ components: properties: type: type: string + enum: + - user_transaction example: user_transaction - $ref: '#/components/schemas/UserTransaction' Transaction_ValidatorTransaction: @@ -12205,6 +12305,8 @@ components: properties: type: type: string + enum: + - validator_transaction example: validator_transaction - $ref: '#/components/schemas/ValidatorTransaction' TransactionsBatchSingleSubmissionFailure: @@ -12366,6 +12468,8 @@ components: properties: validator_transaction_type: type: string + enum: + - dkg_result example: dkg_result - $ref: '#/components/schemas/DKGResultTransaction' ValidatorTransaction_JWKUpdateTransaction: @@ -12376,6 +12480,8 @@ components: properties: validator_transaction_type: type: string + enum: + - observed_jwk_update example: observed_jwk_update - $ref: '#/components/schemas/JWKUpdateTransaction' VersionedEvent: @@ -12492,6 +12598,8 @@ components: properties: type: type: string + enum: + - delete_module example: delete_module - $ref: '#/components/schemas/DeleteModule' WriteSetChange_DeleteResource: @@ -12502,6 +12610,8 @@ components: properties: type: type: string + enum: + - delete_resource example: delete_resource - $ref: '#/components/schemas/DeleteResource' WriteSetChange_DeleteTableItem: @@ -12512,6 +12622,8 @@ components: properties: type: type: string + enum: + - delete_table_item example: delete_table_item - $ref: '#/components/schemas/DeleteTableItem' WriteSetChange_WriteModule: @@ -12522,6 +12634,8 @@ components: properties: type: type: string + enum: + - write_module example: write_module - $ref: '#/components/schemas/WriteModule' WriteSetChange_WriteResource: @@ -12532,6 +12646,8 @@ components: properties: type: type: string + enum: + - write_resource example: write_resource - $ref: '#/components/schemas/WriteResource' WriteSetChange_WriteTableItem: @@ -12542,6 +12658,8 @@ components: properties: type: type: string + enum: + - write_table_item example: write_table_item - $ref: '#/components/schemas/WriteTableItem' WriteSetPayload: @@ -12560,6 +12678,8 @@ components: properties: type: type: string + enum: + - direct_write_set example: direct_write_set - $ref: '#/components/schemas/DirectWriteSet' WriteSet_ScriptWriteSet: @@ -12570,6 +12690,8 @@ components: properties: type: type: string + enum: + - script_write_set example: script_write_set - $ref: '#/components/schemas/ScriptWriteSet' WriteTableItem: diff --git a/api/goldens/aptos_api__tests__state_test__test_get_account_module.json b/api/goldens/aptos_api__tests__state_test__test_get_account_module.json index d830872f6c3f2..37c8ff52436ac 100644 --- a/api/goldens/aptos_api__tests__state_test__test_get_account_module.json +++ b/api/goldens/aptos_api__tests__state_test__test_get_account_module.json @@ -120,6 +120,7 @@ { "name": "GUID", "is_native": false, + "is_event": false, "abilities": [ "drop", "store" @@ -135,6 +136,7 @@ { "name": "ID", "is_native": false, + "is_event": false, "abilities": [ "copy", "drop", diff --git a/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json b/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json index 0c0dbfce4633d..7d32adc125baa 100644 --- a/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json +++ b/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json @@ -1 +1 @@ -{"message":"PartialVMError with status INVALID_MAIN_FUNCTION_SIGNATURE and message function not marked as view function","error_code":"invalid_input","vm_error_code":null} \ No newline at end of file +{"message":"PartialVMError with status INVALID_MAIN_FUNCTION_SIGNATURE and message 'function not marked as view function'","error_code":"invalid_input","vm_error_code":null} \ No newline at end of file diff --git a/api/openapi-spec-generator/src/main.rs b/api/openapi-spec-generator/src/main.rs index bfad65df7d813..cc608e6edce62 100644 --- a/api/openapi-spec-generator/src/main.rs +++ b/api/openapi-spec-generator/src/main.rs @@ -4,7 +4,7 @@ mod fake_context; use anyhow::Result; -use aptos_api::get_api_service; +use aptos_api::{get_api_service, spec::get_spec}; use clap::{Parser, ValueEnum}; use fake_context::get_fake_context; use std::{path::PathBuf, sync::Arc}; @@ -49,9 +49,10 @@ pub fn main() -> Result<()> { let api_service = get_api_service(Arc::new(get_fake_context())); let spec = match args.output_args.format { - OutputFormat::Json => api_service.spec(), - OutputFormat::Yaml => api_service.spec_yaml(), + OutputFormat::Json => get_spec(&api_service, false), + OutputFormat::Yaml => get_spec(&api_service, true), }; + args.output_args.write(&spec) } diff --git a/api/src/accept_type.rs b/api/src/accept_type.rs index e4ca0eb085100..1a0d900988405 100644 --- a/api/src/accept_type.rs +++ b/api/src/accept_type.rs @@ -17,7 +17,6 @@ pub enum AcceptType { /// This impl allows us to get the data straight from the arguments to the /// endpoint handler. -#[async_trait::async_trait] impl<'a> FromRequest<'a> for AcceptType { async fn from_request(request: &'a Request, _body: &mut RequestBody) -> Result { let accept = Accept::from_request_without_body(request).await?; diff --git a/api/src/accounts.rs b/api/src/accounts.rs index 1f597ce8552e1..d94454f6b34e3 100644 --- a/api/src/accounts.rs +++ b/api/src/accounts.rs @@ -183,7 +183,7 @@ pub struct Account { /// Address of account address: Address, /// Lookup ledger version - ledger_version: u64, + pub ledger_version: u64, /// Where to start for pagination start: Option, /// Max number of items to retrieve @@ -193,8 +193,6 @@ pub struct Account { } impl Account { - /// Creates a new account struct and determines the current ledger info, and determines the - /// ledger version to query pub fn new( context: Arc, address: Address, @@ -202,8 +200,7 @@ impl Account { start: Option, limit: Option, ) -> Result { - // Use the latest ledger version, or the requested associated version - let (latest_ledger_info, requested_ledger_version) = context + let (latest_ledger_info, requested_version) = context .get_latest_ledger_info_and_verify_lookup_version( requested_ledger_version.map(|inner| inner.0), )?; @@ -211,7 +208,7 @@ impl Account { Ok(Self { context, address, - ledger_version: requested_ledger_version, + ledger_version: requested_version, start, limit, latest_ledger_info, @@ -471,7 +468,7 @@ impl Account { })?; // Find the resource and retrieve the struct field - let resource = self.find_resource(&struct_tag)?; + let (_, resource) = self.find_resource(&struct_tag)?; let (_id, value) = resource .into_iter() .find(|(id, _)| id == &field_name) @@ -511,12 +508,19 @@ impl Account { Ok(*event_handle.key()) } - /// Find a resource associated with an account + /// Find a resource associated with an account. If the resource is an enum variant, + /// returns the variant name in the option. fn find_resource( &self, resource_type: &StructTag, - ) -> Result, BasicErrorWith404> { - let (ledger_info, ledger_version, state_view) = + ) -> Result< + ( + Option, + Vec<(Identifier, move_core_types::value::MoveValue)>, + ), + BasicErrorWith404, + > { + let (ledger_info, requested_ledger_version, state_view) = self.context.state_view(Some(self.ledger_version))?; let bytes = state_view @@ -534,7 +538,12 @@ impl Account { ) })? .ok_or_else(|| { - resource_not_found(self.address, resource_type, ledger_version, &ledger_info) + resource_not_found( + self.address, + resource_type, + requested_ledger_version, + &ledger_info, + ) })?; state_view diff --git a/api/src/basic.rs b/api/src/basic.rs index 0dc9ff170243d..d6fed2dd70e3f 100644 --- a/api/src/basic.rs +++ b/api/src/basic.rs @@ -87,12 +87,10 @@ impl BasicApi { let ledger_info = api_spawn_blocking(move || context.get_latest_ledger_info()).await?; // If we have a duration, check that it's close to the current time, otherwise it's ok - if let Some(duration) = duration_secs.0 { - let timestamp = ledger_info.timestamp(); - - let timestamp = Duration::from_micros(timestamp); - let expectation = SystemTime::now() - .sub(Duration::from_secs(duration as u64)) + if let Some(max_skew) = duration_secs.0 { + let ledger_timestamp = Duration::from_micros(ledger_info.timestamp()); + let skew_threshold = SystemTime::now() + .sub(Duration::from_secs(max_skew as u64)) .duration_since(UNIX_EPOCH) .context("Failed to determine absolute unix time based on given duration") .map_err(|err| { @@ -103,9 +101,9 @@ impl BasicApi { ) })?; - if timestamp < expectation { + if ledger_timestamp < skew_threshold { return Err(HealthCheckError::service_unavailable_with_code( - "The latest ledger info timestamp is less than the expected timestamp", + format!("The latest ledger info timestamp is {:?}, which is beyond the allowed skew ({}s).", ledger_timestamp, max_skew), AptosErrorCode::HealthCheckFailed, &ledger_info, )); diff --git a/api/src/bcs_payload.rs b/api/src/bcs_payload.rs index 52ad783690bba..422037d817781 100644 --- a/api/src/bcs_payload.rs +++ b/api/src/bcs_payload.rs @@ -49,12 +49,11 @@ impl Payload for Bcs { } } -#[poem::async_trait] impl ParsePayload for Bcs { const IS_REQUIRED: bool = true; async fn from_request(request: &Request, body: &mut RequestBody) -> Result { - let data: Vec = FromRequest::from_request(request, body).await?; + let data = Vec::::from_request(request, body).await?; Ok(Self(data)) } } diff --git a/api/src/check_size.rs b/api/src/check_size.rs index 9b3de83b8c145..4f6f1d35e0d19 100644 --- a/api/src/check_size.rs +++ b/api/src/check_size.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use hyper::Method; use poem::{ error::SizedLimitError, + http::Method, web::headers::{self, HeaderMapExt}, Endpoint, Middleware, Request, Result, }; @@ -37,7 +37,6 @@ pub struct PostSizeLimitEndpoint { max_size: u64, } -#[async_trait::async_trait] impl Endpoint for PostSizeLimitEndpoint { type Output = E::Output; diff --git a/api/src/context.rs b/api/src/context.rs index a077845cb0b63..4cea588b9906a 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -16,20 +16,19 @@ use aptos_api_types::{ AptosErrorCode, AsConverter, BcsBlock, GasEstimation, LedgerInfo, ResourceGroup, TransactionOnChainData, }; -use aptos_config::config::{NodeConfig, RoleType}; +use aptos_config::config::{GasEstimationConfig, NodeConfig, RoleType}; use aptos_crypto::HashValue; use aptos_gas_schedule::{AptosGasParameters, FromOnChainGasSchedule}; use aptos_logger::{error, info, Schema}; use aptos_mempool::{MempoolClientRequest, MempoolClientSender, SubmissionStatus}; use aptos_storage_interface::{ state_view::{DbStateView, DbStateViewAtVersion, LatestDbStateCheckpointView}, - DbReader, Order, MAX_REQUEST_LIMIT, + AptosDbError, DbReader, Order, MAX_REQUEST_LIMIT, }; use aptos_types::{ access_path::{AccessPath, Path}, account_address::AccountAddress, account_config::{AccountResource, NewBlockEvent}, - block_executor::config::BlockExecutorConfigFromOnchain, chain_id::ChainId, contract_event::EventWithVersion, event::EventKey, @@ -42,7 +41,9 @@ use aptos_types::{ TStateView, }, transaction::{ - block_epilogue::BlockEndInfo, SignedTransaction, Transaction, TransactionWithProof, Version, + block_epilogue::BlockEndInfo, + use_case::{UseCaseAwareTransaction, UseCaseKey}, + SignedTransaction, Transaction, TransactionWithProof, Version, }, }; use futures::{channel::oneshot, SinkExt}; @@ -124,8 +125,7 @@ impl Context { })), gas_limit_cache: Arc::new(RwLock::new(GasLimitCache { last_updated_epoch: None, - block_executor_onchain_config: OnChainExecutionConfig::default_if_missing() - .block_executor_onchain_config(), + execution_onchain_config: OnChainExecutionConfig::default_if_missing(), })), view_function_stats, simulate_txn_stats, @@ -221,20 +221,26 @@ impl Context { .map_err(|e| e.into()) } - pub fn get_latest_ledger_info(&self) -> Result { + pub fn get_oldest_version_and_block_height( + &self, + ) -> Result<(Version, u64), E> { + self.db + .get_first_viable_block() + .context("Failed to retrieve oldest block information") + .map_err(|e| E::service_unavailable_with_code_no_info(e, AptosErrorCode::InternalError)) + } + + pub fn get_latest_storage_ledger_info( + &self, + ) -> Result { let ledger_info = self .get_latest_ledger_info_with_signatures() .context("Failed to retrieve latest ledger info") .map_err(|e| { E::service_unavailable_with_code_no_info(e, AptosErrorCode::InternalError) })?; - let (oldest_version, oldest_block_height) = self - .db - .get_first_viable_block() - .context("Failed to retrieve oldest block information") - .map_err(|e| { - E::service_unavailable_with_code_no_info(e, AptosErrorCode::InternalError) - })?; + + let (oldest_version, oldest_block_height) = self.get_oldest_version_and_block_height()?; let (_, _, newest_block_event) = self .db .get_block_info_by_version(ledger_info.ledger_info().version()) @@ -252,6 +258,15 @@ impl Context { )) } + pub fn get_latest_ledger_info(&self) -> Result { + if let Some(indexer_reader) = self.indexer_reader.as_ref() { + if indexer_reader.is_internal_indexer_enabled() { + return self.get_latest_internal_indexer_ledger_info(); + } + } + self.get_latest_storage_ledger_info() + } + pub fn get_latest_ledger_info_and_verify_lookup_version( &self, requested_ledger_version: Option, @@ -277,6 +292,45 @@ impl Context { Ok((latest_ledger_info, requested_ledger_version)) } + pub fn get_latest_internal_indexer_ledger_info( + &self, + ) -> Result { + if let Some(indexer_reader) = self.indexer_reader.as_ref() { + if let Some(latest_version) = indexer_reader + .get_latest_internal_indexer_ledger_version() + .map_err(|err| { + E::service_unavailable_with_code_no_info(err, AptosErrorCode::InternalError) + })? + { + let (_, _, new_block_event) = self + .db + .get_block_info_by_version(latest_version) + .map_err(|_| { + E::service_unavailable_with_code_no_info( + "Failed to get block", + AptosErrorCode::InternalError, + ) + })?; + let (oldest_version, oldest_block_height) = + self.get_oldest_version_and_block_height()?; + return Ok(LedgerInfo::new_ledger_info( + &self.chain_id(), + new_block_event.epoch(), + latest_version, + oldest_version, + oldest_block_height, + new_block_event.height(), + new_block_event.proposed_time(), + )); + } + } + + Err(E::service_unavailable_with_code_no_info( + "Indexer reader doesn't exist, or doesn't have data.", + AptosErrorCode::InternalError, + )) + } + pub fn get_latest_ledger_info_with_signatures(&self) -> Result { Ok(self.db.get_latest_ledger_info()?) } @@ -350,19 +404,26 @@ impl Context { address: AccountAddress, version: u64, ) -> Result> { - let mut iter = self.db.get_prefixed_state_value_iterator( - &StateKeyPrefix::from(address), - None, - version, - )?; + let mut iter = if !db_sharding_enabled(&self.node_config) { + Box::new( + self.db + .get_prefixed_state_value_iterator( + &StateKeyPrefix::from(address), + None, + version, + )? + .map(|item| item.map_err(|err| anyhow!(err.to_string()))), + ) + } else { + self.indexer_reader + .as_ref() + .ok_or_else(|| format_err!("Indexer reader doesn't exist"))? + .get_prefixed_state_value_iterator(&StateKeyPrefix::from(address), None, version)? + }; let kvs = iter .by_ref() .take(MAX_REQUEST_LIMIT as usize) - .map(|res| match res { - Ok((k, v)) => Ok((k, v)), - Err(res) => Err(anyhow::Error::from(res)), - }) .collect::>()?; if iter.next().transpose()?.is_some() { bail!("Too many state items under account ({:?}).", address); @@ -377,11 +438,26 @@ impl Context { version: u64, limit: u64, ) -> Result<(Vec<(StructTag, Vec)>, Option)> { - let account_iter = self.db.get_prefixed_state_value_iterator( - &StateKeyPrefix::from(address), - prev_state_key, - version, - )?; + let account_iter = if !db_sharding_enabled(&self.node_config) { + Box::new( + self.db + .get_prefixed_state_value_iterator( + &StateKeyPrefix::from(address), + prev_state_key, + version, + )? + .map(|item| item.map_err(|err| anyhow!(err.to_string()))), + ) + } else { + self.indexer_reader + .as_ref() + .ok_or_else(|| format_err!("Indexer reader doesn't exist"))? + .get_prefixed_state_value_iterator( + &StateKeyPrefix::from(address), + prev_state_key, + version, + )? + }; // TODO: Consider rewriting this to consider resource groups: // * If a resource group is found, expand // * Return Option)>> @@ -408,7 +484,7 @@ impl Context { Some(Err(format_err!( "storage prefix scan return inconsistent key ({:?})", k ))) } }, - Err(e) => Some(Err(e.into())), + Err(e) => Some(Err(e)), }) .take(limit as usize + 1); let kvs = resource_iter @@ -453,11 +529,26 @@ impl Context { version: u64, limit: u64, ) -> Result<(Vec<(ModuleId, Vec)>, Option)> { - let account_iter = self.db.get_prefixed_state_value_iterator( - &StateKeyPrefix::from(address), - prev_state_key, - version, - )?; + let account_iter = if !db_sharding_enabled(&self.node_config) { + Box::new( + self.db + .get_prefixed_state_value_iterator( + &StateKeyPrefix::from(address), + prev_state_key, + version, + )? + .map(|item| item.map_err(|err| anyhow!(err.to_string()))), + ) + } else { + self.indexer_reader + .as_ref() + .ok_or_else(|| format_err!("Indexer reader doesn't exist"))? + .get_prefixed_state_value_iterator( + &StateKeyPrefix::from(address), + prev_state_key, + version, + )? + }; let mut module_iter = account_iter .filter_map(|res| match res { Ok((k, v)) => match k.inner() { @@ -473,7 +564,7 @@ impl Context { Some(Err(format_err!( "storage prefix scan return inconsistent key ({:?})", k ))) } }, - Err(e) => Some(Err(e.into())), + Err(e) => Some(Err(e)), }) .take(limit as usize + 1); let kvs = module_iter @@ -730,15 +821,31 @@ impl Context { .saturating_sub(limit as u64) }; - let txns = self - .db - .get_account_transactions( + let txns_res = if !db_sharding_enabled(&self.node_config) { + self.db.get_account_transactions( address, start_seq_number, limit as u64, true, ledger_version, ) + } else { + self.indexer_reader + .as_ref() + .ok_or(anyhow!("Indexer reader is None")) + .map_err(|err| { + E::internal_with_code(err, AptosErrorCode::InternalError, ledger_info) + })? + .get_account_transactions( + address, + start_seq_number, + limit as u64, + true, + ledger_version, + ) + .map_err(|e| AptosDbError::Other(e.to_string())) + }; + let txns = txns_res .context("Failed to retrieve account transactions") .map_err(|err| { E::internal_with_code(err, AptosErrorCode::InternalError, ledger_info) @@ -813,28 +920,25 @@ impl Context { limit: u16, ledger_version: u64, ) -> Result> { - if let Some(start) = start { - Ok(self.db.get_events( - event_key, - start, - Order::Ascending, - limit as u64, - ledger_version, - )?) + let (start, order) = if let Some(start) = start { + (start, Order::Ascending) } else { - Ok(self - .db - .get_events( - event_key, - u64::MAX, - Order::Descending, - limit as u64, - ledger_version, - ) - .map(|mut result| { - result.reverse(); - result - })?) + (u64::MAX, Order::Descending) + }; + let mut res = if !db_sharding_enabled(&self.node_config) { + self.db + .get_events(event_key, start, order, limit as u64, ledger_version)? + } else { + self.indexer_reader + .as_ref() + .ok_or(anyhow!("Internal indexer reader doesn't exist"))? + .get_events(event_key, start, order, limit as u64, ledger_version)? + }; + if order == Order::Descending { + res.reverse(); + Ok(res) + } else { + Ok(res) } } @@ -894,9 +998,10 @@ impl Context { start_version: Version, limit: u64, ledger_version: Version, - ) -> Result<(Vec<(u64, u64)>, Vec)> { + count_majority_use_case: bool, + ) -> Result<(Vec<(u64, u64)>, Vec, Option)> { if start_version > ledger_version || limit == 0 { - return Ok((vec![], vec![])); + return Ok((vec![], vec![], None)); } // This is just an estimation, so we can just skip over errors @@ -908,11 +1013,16 @@ impl Context { let mut gas_prices = Vec::new(); let mut block_end_infos = Vec::new(); + let mut count_by_use_case = HashMap::new(); for (txn, info) in txns.zip(infos) { match txn.as_ref() { Ok(Transaction::UserTransaction(txn)) => { if let Ok(info) = info.as_ref() { gas_prices.push((txn.gas_unit_price(), info.gas_used())); + if count_majority_use_case { + let use_case_key = txn.parse_use_case(); + *count_by_use_case.entry(use_case_key).or_insert(0) += 1; + } } }, Ok(Transaction::BlockEpilogue(txn)) => { @@ -924,7 +1034,80 @@ impl Context { } } - Ok((gas_prices, block_end_infos)) + let majority_use_case_fraction = if count_majority_use_case { + count_by_use_case.iter().max_by_key(|(_, v)| *v).and_then( + |(max_use_case, max_value)| { + if let UseCaseKey::ContractAddress(_) = max_use_case { + Some(*max_value as f32 / count_by_use_case.values().sum::() as f32) + } else { + None + } + }, + ) + } else { + None + }; + Ok((gas_prices, block_end_infos, majority_use_case_fraction)) + } + + fn block_min_inclusion_price( + &self, + ledger_info: &LedgerInfo, + first: Version, + last: Version, + gas_estimation_config: &GasEstimationConfig, + execution_config: &OnChainExecutionConfig, + ) -> Option { + let user_use_case_spread_factor = if gas_estimation_config.incorporate_reordering_effects { + execution_config + .transaction_shuffler_type() + .user_use_case_spread_factor() + } else { + None + }; + + match self.get_gas_prices_and_used( + first, + last - first, + ledger_info.ledger_version.0, + user_use_case_spread_factor.is_some(), + ) { + Ok((prices_and_used, block_end_infos, majority_use_case_fraction)) => { + let is_full_block = + if majority_use_case_fraction.map_or(false, |fraction| fraction > 0.5) { + // If majority use case is above half of transactions, UseCaseAware block reordering + // will allow other transactions to get in the block (AIP-68) + false + } else if prices_and_used.len() >= gas_estimation_config.full_block_txns { + true + } else if !block_end_infos.is_empty() { + assert_eq!(1, block_end_infos.len()); + block_end_infos.first().unwrap().limit_reached() + } else if let Some(block_gas_limit) = + execution_config.block_gas_limit_type().block_gas_limit() + { + let gas_used = prices_and_used.iter().map(|(_, used)| *used).sum::(); + gas_used >= block_gas_limit + } else { + false + }; + + if is_full_block { + Some( + self.next_bucket( + prices_and_used + .iter() + .map(|(price, _)| *price) + .min() + .unwrap(), + ), + ) + } else { + None + } + }, + Err(_) => None, + } } pub fn estimate_gas_price( @@ -933,7 +1116,7 @@ impl Context { ) -> Result { let config = &self.node_config.api.gas_estimation; let min_gas_unit_price = self.min_gas_unit_price(ledger_info)?; - let block_config = self.block_executor_onchain_config(ledger_info)?; + let execution_config = self.execution_onchain_config(ledger_info)?; if !config.enabled { return Ok(self.default_gas_estimation(min_gas_unit_price)); } @@ -1014,40 +1197,9 @@ impl Context { let mut min_inclusion_prices = vec![]; // TODO: if multiple calls to db is a perf issue, combine into a single call and then split for (first, last) in blocks { - let min_inclusion_price = match self.get_gas_prices_and_used( - first, - last - first, - ledger_info.ledger_version.0, - ) { - Ok((prices_and_used, block_end_infos)) => { - let is_full_block = if prices_and_used.len() >= config.full_block_txns { - true - } else if !block_end_infos.is_empty() { - assert_eq!(1, block_end_infos.len()); - block_end_infos.first().unwrap().limit_reached() - } else if let Some(block_gas_limit) = - block_config.block_gas_limit_type.block_gas_limit() - { - let gas_used = prices_and_used.iter().map(|(_, used)| *used).sum::(); - gas_used >= block_gas_limit - } else { - false - }; - - if is_full_block { - self.next_bucket( - prices_and_used - .iter() - .map(|(price, _)| *price) - .min() - .unwrap(), - ) - } else { - min_gas_unit_price - } - }, - Err(_) => min_gas_unit_price, - }; + let min_inclusion_price = self + .block_min_inclusion_price(ledger_info, first, last, config, &execution_config) + .unwrap_or(min_gas_unit_price); min_inclusion_prices.push(min_inclusion_price); cache .min_inclusion_prices @@ -1215,16 +1367,16 @@ impl Context { } } - pub fn block_executor_onchain_config( + pub fn execution_onchain_config( &self, ledger_info: &LedgerInfo, - ) -> Result { + ) -> Result { // If it's the same epoch, use the cached results { let cache = self.gas_limit_cache.read().unwrap(); if let Some(ref last_updated_epoch) = cache.last_updated_epoch { if *last_updated_epoch == ledger_info.epoch.0 { - return Ok(cache.block_executor_onchain_config.clone()); + return Ok(cache.execution_onchain_config.clone()); } } } @@ -1235,7 +1387,7 @@ impl Context { // If a different thread updated the cache, we can exit early if let Some(ref last_updated_epoch) = cache.last_updated_epoch { if *last_updated_epoch == ledger_info.epoch.0 { - return Ok(cache.block_executor_onchain_config.clone()); + return Ok(cache.execution_onchain_config.clone()); } } @@ -1247,14 +1399,13 @@ impl Context { E::internal_with_code(e, AptosErrorCode::InternalError, ledger_info) })?; - let block_executor_onchain_config = OnChainExecutionConfig::fetch_config(&state_view) - .unwrap_or_else(OnChainExecutionConfig::default_if_missing) - .block_executor_onchain_config(); + let execution_onchain_config = OnChainExecutionConfig::fetch_config(&state_view) + .unwrap_or_else(OnChainExecutionConfig::default_if_missing); // Update the cache - cache.block_executor_onchain_config = block_executor_onchain_config.clone(); + cache.execution_onchain_config = execution_onchain_config.clone(); cache.last_updated_epoch = Some(ledger_info.epoch.0); - Ok(block_executor_onchain_config) + Ok(execution_onchain_config) } } @@ -1314,7 +1465,7 @@ pub struct GasEstimationCache { pub struct GasLimitCache { last_updated_epoch: Option, - block_executor_onchain_config: BlockExecutorConfigFromOnchain, + execution_onchain_config: OnChainExecutionConfig, } /// This function just calls tokio::task::spawn_blocking with the given closure and in @@ -1444,3 +1595,7 @@ impl FunctionStats { } } } + +fn db_sharding_enabled(node_config: &NodeConfig) -> bool { + node_config.storage.rocksdb_configs.enable_storage_sharding +} diff --git a/api/src/index.rs b/api/src/index.rs index 94b5289636413..ba91cbb34c342 100644 --- a/api/src/index.rs +++ b/api/src/index.rs @@ -33,7 +33,6 @@ impl IndexApi { self.context .check_api_output_enabled("Get ledger info", &accept_type)?; let ledger_info = self.context.get_latest_ledger_info()?; - let node_role = self.context.node_role(); api_spawn_blocking(move || match accept_type { diff --git a/api/src/lib.rs b/api/src/lib.rs index 838394268d667..fa72f894e47e4 100644 --- a/api/src/lib.rs +++ b/api/src/lib.rs @@ -21,6 +21,7 @@ mod page; mod response; mod runtime; mod set_failpoints; +pub mod spec; mod state; #[cfg(test)] pub mod tests; diff --git a/api/src/log.rs b/api/src/log.rs index f305815176d00..b9ee7a2ab4d52 100644 --- a/api/src/log.rs +++ b/api/src/log.rs @@ -9,9 +9,11 @@ use aptos_logger::{ prelude::{sample, SampleRate}, warn, Schema, }; -use hyper::Method; use once_cell::sync::Lazy; -use poem::{http::header, Endpoint, Request, Response, Result}; +use poem::{ + http::{header, Method}, + Endpoint, Request, Response, Result, +}; use poem_openapi::OperationId; use regex::Regex; use std::time::Duration; diff --git a/api/src/runtime.rs b/api/src/runtime.rs index cf666d1f959df..219036a90b5d7 100644 --- a/api/src/runtime.rs +++ b/api/src/runtime.rs @@ -3,9 +3,19 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - accounts::AccountsApi, basic::BasicApi, blocks::BlocksApi, check_size::PostSizeLimit, - context::Context, error_converter::convert_error, events::EventsApi, index::IndexApi, - log::middleware_log, set_failpoints, state::StateApi, transactions::TransactionsApi, + accounts::AccountsApi, + basic::BasicApi, + blocks::BlocksApi, + check_size::PostSizeLimit, + context::Context, + error_converter::convert_error, + events::EventsApi, + index::IndexApi, + log::middleware_log, + set_failpoints, + spec::{spec_endpoint_json, spec_endpoint_yaml}, + state::StateApi, + transactions::TransactionsApi, view_function::ViewFunctionApi, }; use anyhow::Context as AnyhowContext; @@ -164,8 +174,8 @@ pub fn attach_poem_to_runtime( let api_service = get_api_service(context.clone()); - let spec_json = api_service.spec_endpoint(); - let spec_yaml = api_service.spec_endpoint_yaml(); + let spec_json = spec_endpoint_json(&api_service); + let spec_yaml = spec_endpoint_yaml(&api_service); let mut address = config.api.address; diff --git a/api/src/spec.rs b/api/src/spec.rs new file mode 100644 index 0000000000000..10293cb71e097 --- /dev/null +++ b/api/src/spec.rs @@ -0,0 +1,58 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use poem::{ + endpoint::{make_sync, Endpoint}, + Response, +}; +use poem_openapi::{OpenApi, OpenApiService, Webhook}; + +/// Get the spec as JSON. We implement our own function because poem-openapi versions +/// greater than 2.0.11 add this charset thing to the content type. This causes issues +/// with our current Accept logic and messes up some code generators and our spec page, +/// so we remove it. +pub fn get_spec(service: &OpenApiService, yaml: bool) -> String +where + T: OpenApi, + W: Webhook, +{ + let spec = if yaml { + service.spec_yaml() + } else { + service.spec() + }; + spec.replace("; charset=utf-8", "") +} + +/// Create an endpoint to serve the OpenAPI specification as json. We define this +/// ourselves because we need to use our custom `get_spec` function that changes the +/// spec to remove charset from the content type. +pub fn spec_endpoint_json(service: &OpenApiService) -> impl Endpoint +where + T: OpenApi, + W: Webhook, +{ + let spec = get_spec(service, false); + make_sync(move |_| { + Response::builder() + .content_type("application/json") + .body(spec.clone()) + }) +} + +/// Create an endpoint to serve the OpenAPI specification as yaml. We define this +/// ourselves because we need to use our custom `get_spec` function that changes the +/// spec to remove charset from the content type. +pub fn spec_endpoint_yaml(service: &OpenApiService) -> impl Endpoint +where + T: OpenApi, + W: Webhook, +{ + let spec = get_spec(service, true); + make_sync(move |_| { + Response::builder() + .content_type("application/x-yaml") + .header("Content-Disposition", "inline; filename=\"spec.yaml\"") + .body(spec.clone()) + }) +} diff --git a/api/src/tests/accounts_test.rs b/api/src/tests/accounts_test.rs index da00aeff218e7..a5199cf61c452 100644 --- a/api/src/tests/accounts_test.rs +++ b/api/src/tests/accounts_test.rs @@ -3,7 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 use super::new_test_context; -use aptos_api_test_context::{current_function_name, find_value}; +use crate::tests::new_test_context_with_db_sharding_and_internal_indexer; +use aptos_api_test_context::{current_function_name, find_value, TestContext}; use aptos_api_types::{MoveModuleBytecode, MoveResource, MoveStructTag, StateKeyWrapper}; use aptos_cached_packages::aptos_stdlib; use serde_json::json; @@ -36,9 +37,21 @@ async fn test_get_account_resources_by_address_0x0() { async fn test_get_account_resources_by_valid_account_address() { let context = new_test_context(current_function_name!()); let addresses = vec!["0x1", "0x00000000000000000000000000000001"]; + let mut res = vec![]; for address in &addresses { - context.get(&account_resources(address)).await; + let resp = context.get(&account_resources(address)).await; + res.push(resp); } + + let shard_context = + new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let mut shard_res = vec![]; + for address in &addresses { + let resp = shard_context.get(&account_resources(address)).await; + shard_res.push(resp); + } + + assert_eq!(res, shard_res); } // Unstable due to framework changes @@ -96,13 +109,15 @@ async fn test_account_modules_structs() { context.check_golden_output(resp); } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_get_account_resources_by_ledger_version() { - let mut context = new_test_context(current_function_name!()); +async fn test_account_resources_by_ledger_version_with_context(mut context: TestContext) { let account = context.gen_account(); let txn = context.create_user_account(&account).await; context.commit_block(&vec![txn.clone()]).await; + if let Some(indexer_reader) = context.context.indexer_reader.as_ref() { + indexer_reader.wait_for_internal_indexer(2).unwrap(); + } + let ledger_version_1_resources = context .get(&account_resources( &context.root_account().await.address().to_hex_literal(), @@ -125,13 +140,23 @@ async fn test_get_account_resources_by_ledger_version() { assert_eq!(root_account["data"]["sequence_number"], "0"); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_get_account_resources_by_ledger_version() { + let context = new_test_context(current_function_name!()); + test_account_resources_by_ledger_version_with_context(context).await; + let shard_context = + new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + test_account_resources_by_ledger_version_with_context(shard_context).await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_account_resources_by_too_large_ledger_version() { let mut context = new_test_context(current_function_name!()); + let account = context.root_account().await; let resp = context .expect_status_code(404) .get(&account_resources_with_ledger_version( - &context.root_account().await.address().to_hex_literal(), + &account.address().to_hex_literal(), 1000000000000000000, )) .await; @@ -151,9 +176,7 @@ async fn test_get_account_resources_by_invalid_ledger_version() { context.check_golden_output(resp); } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_get_account_modules_by_ledger_version() { - let mut context = new_test_context(current_function_name!()); +async fn test_get_account_modules_by_ledger_version_with_context(mut context: TestContext) { let payload = aptos_stdlib::publish_module_source("test_module", "module 0xa550c18::test_module {}"); @@ -162,6 +185,10 @@ async fn test_get_account_modules_by_ledger_version() { root_account.sign_with_transaction_builder(context.transaction_factory().payload(payload)); context.commit_block(&vec![txn.clone()]).await; + if let Some(indexer_reader) = context.context.indexer_reader.as_ref() { + indexer_reader.wait_for_internal_indexer(2).unwrap(); + } + let modules = context .get(&account_modules( &context.root_account().await.address().to_hex_literal(), @@ -178,6 +205,15 @@ async fn test_get_account_modules_by_ledger_version() { assert_eq!(modules, json!([])); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_get_account_modules_by_ledger_version() { + let context = new_test_context(current_function_name!()); + test_get_account_modules_by_ledger_version_with_context(context).await; + let shard_context = + new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + test_get_account_modules_by_ledger_version_with_context(shard_context).await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_core_account_data() { let mut context = new_test_context(current_function_name!()); diff --git a/api/src/tests/events_test.rs b/api/src/tests/events_test.rs index acf517b61a04c..1c1f69830cc06 100644 --- a/api/src/tests/events_test.rs +++ b/api/src/tests/events_test.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use super::new_test_context; +use crate::tests::new_test_context_with_db_sharding_and_internal_indexer; use aptos_api_test_context::{current_function_name, TestContext}; use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC}; use serde_json::json; @@ -35,7 +36,21 @@ async fn test_get_events_filter_by_start_sequence_number() { .as_str(), ) .await; - context.check_golden_output(resp); + context.check_golden_output(resp.clone()); + + // assert the same resp after db sharding migration with internal indexer turned on + let shard_context = + new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let new_resp = shard_context + .get( + format!( + "/accounts/{}/events/{}?start=1", + ACCOUNT_ADDRESS, CREATION_NUMBER + ) + .as_str(), + ) + .await; + assert_eq!(resp, new_resp); } // turn it back until we have multiple events in genesis @@ -84,7 +99,14 @@ async fn test_get_events_by_account_event_handle() { let resp = context .get("/accounts/0x1/events/0x1::reconfiguration::Configuration/events") .await; - context.check_golden_output(resp); + context.check_golden_output(resp.clone()); + + let shard_context = + new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let new_resp = shard_context + .get("/accounts/0x1/events/0x1::reconfiguration::Configuration/events") + .await; + assert_eq!(resp, new_resp); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/api/src/tests/mod.rs b/api/src/tests/mod.rs index e581acdbad02a..e7978f66a126e 100644 --- a/api/src/tests/mod.rs +++ b/api/src/tests/mod.rs @@ -22,7 +22,7 @@ mod view_function; mod webauthn_secp256r1_ecdsa; use aptos_api_test_context::{new_test_context as super_new_test_context, TestContext}; -use aptos_config::config::NodeConfig; +use aptos_config::config::{internal_indexer_db_config::InternalIndexerDBConfig, NodeConfig}; fn new_test_context(test_name: String) -> TestContext { new_test_context_with_config(test_name, NodeConfig::default()) @@ -31,3 +31,10 @@ fn new_test_context(test_name: String) -> TestContext { fn new_test_context_with_config(test_name: String, node_config: NodeConfig) -> TestContext { super_new_test_context(test_name, node_config, false) } + +fn new_test_context_with_db_sharding_and_internal_indexer(test_name: String) -> TestContext { + let mut node_config = NodeConfig::default(); + node_config.storage.rocksdb_configs.enable_storage_sharding = true; + node_config.indexer_db_config = InternalIndexerDBConfig::new(true, true, true, 10_000); + super_new_test_context(test_name, node_config, true) +} diff --git a/api/src/tests/modules.rs b/api/src/tests/modules.rs index e451ff9718585..66131a72f0de9 100644 --- a/api/src/tests/modules.rs +++ b/api/src/tests/modules.rs @@ -64,4 +64,23 @@ async fn test_abi() { assert_eq!(function["is_view"], false); } + + // Confirm that MyEvent is considered an event. + let structs = modules.as_array().unwrap()[0]["abi"]["structs"] + .as_array() + .unwrap(); + let my_event = structs + .iter() + .find(|s| s["name"].as_str().unwrap() == "MyEvent") + .unwrap(); + + assert_eq!(my_event["is_event"], true); + + // Confirm that State is not considered an event. + let my_struct = structs + .iter() + .find(|s| s["name"].as_str().unwrap() == "State") + .unwrap(); + + assert_eq!(my_struct["is_event"], false); } diff --git a/api/src/tests/move/pack_abi/sources/test.move b/api/src/tests/move/pack_abi/sources/test.move index 83b95de8f87bc..5bf35cbe4b2bb 100644 --- a/api/src/tests/move/pack_abi/sources/test.move +++ b/api/src/tests/move/pack_abi/sources/test.move @@ -4,6 +4,11 @@ module abi::test { value: u64 } + #[event] + struct MyEvent has store { + value: u64 + } + public fun public_function(s: &signer, state: State) { move_to(s, state) } @@ -11,6 +16,7 @@ module abi::test { public entry fun public_entry_function(s1: &signer, s2: &signer, value: u64) { move_to(s1, State { value }); move_to(s2, State { value }); + } entry fun private_entry_function(s: &signer, value: u64) { diff --git a/api/src/tests/multisig_transactions_test.rs b/api/src/tests/multisig_transactions_test.rs index a01ccf1c9f547..b4f4cdddb6886 100644 --- a/api/src/tests/multisig_transactions_test.rs +++ b/api/src/tests/multisig_transactions_test.rs @@ -415,6 +415,54 @@ async fn test_multisig_transaction_with_mismatching_payload() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_multisig_transaction_simulation() { + let mut context = new_test_context(current_function_name!()); + let owner_account_1 = &mut context.create_account().await; + let owner_account_2 = &mut context.create_account().await; + let owner_account_3 = &mut context.create_account().await; + let multisig_account = context + .create_multisig_account( + owner_account_1, + vec![owner_account_2.address(), owner_account_3.address()], + 1, /* 1-of-3 */ + 1000, /* initial balance */ + ) + .await; + + let multisig_payload = construct_multisig_txn_transfer_payload(owner_account_1.address(), 1000); + context + .create_multisig_transaction(owner_account_1, multisig_account, multisig_payload.clone()) + .await; + + // Simulate the multisig tx + let simulation_resp = context + .simulate_multisig_transaction( + owner_account_1, + multisig_account, + "0x1::aptos_account::transfer", + &[], + &[&owner_account_1.address().to_hex_literal(), "1000"], + 200, + ) + .await; + // Validate that the simulation did successfully execute a transfer of 1000 coins from the + // multisig account. + let simulation_resp = &simulation_resp.as_array().unwrap()[0]; + assert!(simulation_resp["success"].as_bool().unwrap()); + let withdraw_event = &simulation_resp["events"].as_array().unwrap()[0]; + assert_eq!( + withdraw_event["type"].as_str().unwrap(), + "0x1::coin::CoinWithdraw" + ); + let withdraw_from_account = + AccountAddress::from_hex_literal(withdraw_event["data"]["account"].as_str().unwrap()) + .unwrap(); + let withdrawn_amount = withdraw_event["data"]["amount"].as_str().unwrap(); + assert_eq!(withdraw_from_account, multisig_account); + assert_eq!(withdrawn_amount, "1000"); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_multisig_transaction_simulation_2_of_3() { let mut context = new_test_context(current_function_name!()); let owner_account_1 = &mut context.create_account().await; let owner_account_2 = &mut context.create_account().await; @@ -429,8 +477,16 @@ async fn test_multisig_transaction_simulation() { ) .await; - // Should be able to simulate the multisig tx without having enough approvals or the transaction - // created. + let multisig_payload = construct_multisig_txn_transfer_payload(owner_account_1.address(), 1000); + context + .create_multisig_transaction(owner_account_1, multisig_account, multisig_payload.clone()) + .await; + + context + .approve_multisig_transaction(owner_account_2, multisig_account, 1) + .await; + + // Simulate the multisig transaction let simulation_resp = context .simulate_multisig_transaction( owner_account_1, @@ -456,6 +512,27 @@ async fn test_multisig_transaction_simulation() { let withdrawn_amount = withdraw_event["data"]["amount"].as_str().unwrap(); assert_eq!(withdraw_from_account, multisig_account); assert_eq!(withdrawn_amount, "1000"); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_multisig_transaction_simulation_fail() { + let mut context = new_test_context(current_function_name!()); + let owner_account_1 = &mut context.create_account().await; + let owner_account_2 = &mut context.create_account().await; + let owner_account_3 = &mut context.create_account().await; + let multisig_account = context + .create_multisig_account( + owner_account_1, + vec![owner_account_2.address(), owner_account_3.address()], + 1, /* 1-of-3 */ + 1000, /* initial balance */ + ) + .await; + + let multisig_payload = construct_multisig_txn_transfer_payload(owner_account_1.address(), 2000); + context + .create_multisig_transaction(owner_account_1, multisig_account, multisig_payload.clone()) + .await; // Simulating transferring more than what the multisig account has should fail. let simulation_resp = context @@ -469,7 +546,56 @@ async fn test_multisig_transaction_simulation() { ) .await; let simulation_resp = &simulation_resp.as_array().unwrap()[0]; + let transaction_failed = &simulation_resp["events"] + .as_array() + .unwrap() + .iter() + .any(|event| { + event["type"] + .as_str() + .unwrap() + .contains("TransactionExecutionFailed") + }); + assert!(transaction_failed); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_multisig_transaction_simulation_fail_2_of_3_insufficient_approvals() { + let mut context = new_test_context(current_function_name!()); + let owner_account_1 = &mut context.create_account().await; + let owner_account_2 = &mut context.create_account().await; + let owner_account_3 = &mut context.create_account().await; + let multisig_account = context + .create_multisig_account( + owner_account_1, + vec![owner_account_2.address(), owner_account_3.address()], + 2, /* 2-of-3 */ + 1000, /* initial balance */ + ) + .await; + + let multisig_payload = construct_multisig_txn_transfer_payload(owner_account_1.address(), 2000); + context + .create_multisig_transaction(owner_account_1, multisig_account, multisig_payload.clone()) + .await; + + // Simulating without sufficient approvals has should fail. + let simulation_resp = context + .simulate_multisig_transaction( + owner_account_1, + multisig_account, + "0x1::aptos_account::transfer", + &[], + &[&owner_account_1.address().to_hex_literal(), "1000"], + 200, + ) + .await; + let simulation_resp = &simulation_resp.as_array().unwrap()[0]; assert!(!simulation_resp["success"].as_bool().unwrap()); + assert!(simulation_resp["vm_status"] + .as_str() + .unwrap() + .contains("MULTISIG_TRANSACTION_INSUFFICIENT_APPROVALS")); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -487,6 +613,11 @@ async fn test_simulate_multisig_transaction_should_charge_gas_against_sender() { .await; assert_eq!(10, context.get_apt_balance(multisig_account).await); + let multisig_payload = construct_multisig_txn_transfer_payload(owner_account.address(), 10); + context + .create_multisig_transaction(owner_account, multisig_account, multisig_payload.clone()) + .await; + // This simulation should succeed because gas should be paid out of the sender account (owner), // not the multisig account itself. let simulation_resp = context diff --git a/api/src/tests/simulation_test.rs b/api/src/tests/simulation_test.rs index 3e60ecb9af937..7f2623e1f028e 100644 --- a/api/src/tests/simulation_test.rs +++ b/api/src/tests/simulation_test.rs @@ -2,10 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use super::new_test_context; -use aptos_api_test_context::{current_function_name, TestContext}; +use aptos_api_test_context::{current_function_name, pretty, TestContext}; use aptos_crypto::ed25519::Ed25519Signature; -use aptos_types::transaction::{ - authenticator::TransactionAuthenticator, EntryFunction, TransactionPayload, +use aptos_types::{ + account_address::AccountAddress, + transaction::{ + authenticator::{AccountAuthenticator, TransactionAuthenticator}, + EntryFunction, RawTransaction, SignedTransaction, TransactionPayload, + }, }; use move_core_types::{ident_str, language_storage::ModuleId}; use serde_json::json; @@ -153,3 +157,272 @@ async fn test_simulate_txn_with_aggregator() { unreachable!("Simulation uses Ed25519 authenticator."); } } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_bcs_simulate_simple() { + let transfer_amount: u64 = SMALL_TRANSFER_AMOUNT; + + let mut context = new_test_context(current_function_name!()); + let alice = &mut context.gen_account(); + let bob = &mut context.gen_account(); + let txn = context.mint_user_account(alice).await; + context.commit_block(&vec![txn]).await; + + let txn = context.account_transfer_to(alice, bob.address(), transfer_amount); + let body = bcs::to_bytes(&txn).unwrap(); + + // expected to fail due to using a valid signature. + let _resp = context + .expect_status_code(400) + .post_bcs_txn("/transactions/simulate", body) + .await; + + if let TransactionAuthenticator::Ed25519 { + public_key, + signature: _, + } = txn.authenticator_ref() + { + let txn = SignedTransaction::new_signed_transaction( + txn.clone().into_raw_transaction(), + TransactionAuthenticator::Ed25519 { + public_key: public_key.clone(), + signature: Ed25519Signature::dummy_signature(), + }, + ); + + let body = bcs::to_bytes(&txn).unwrap(); + + // expected to succeed + let resp = context + .expect_status_code(200) + .post_bcs_txn("/transactions/simulate", body) + .await; + + assert!(resp[0]["success"].as_bool().unwrap(), "{}", pretty(&resp)); + } else { + unreachable!("Simulation uses Ed25519 authenticator."); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_bcs_simulate_without_auth_key_check() { + let transfer_amount: u64 = SMALL_TRANSFER_AMOUNT; + + let mut context = new_test_context(current_function_name!()); + let alice = &mut context.gen_account(); + let bob = &mut context.gen_account(); + let txn = context.mint_user_account(alice).await; + context.commit_block(&vec![txn]).await; + + // Construct a signed transaction. + let txn = context.account_transfer_to(alice, bob.address(), transfer_amount); + // Replace the authenticator with a NoAccountAuthenticator in the transaction. + let txn = SignedTransaction::new_signed_transaction( + txn.clone().into_raw_transaction(), + TransactionAuthenticator::SingleSender { + sender: AccountAuthenticator::NoAccountAuthenticator, + }, + ); + + let body = bcs::to_bytes(&txn).unwrap(); + + // expected to succeed + let resp = context + .expect_status_code(200) + .post_bcs_txn("/transactions/simulate", body) + .await; + assert!(resp[0]["success"].as_bool().unwrap(), "{}", pretty(&resp)); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_bcs_simulate_fee_payer_transaction_without_gas_fee_check() { + let mut context = new_test_context(current_function_name!()); + let alice = &mut context.gen_account(); + let bob = &mut context.gen_account(); + let txn = context.mint_user_account(alice).await; + context.commit_block(&vec![txn]).await; + + let transfer_amount: u64 = SMALL_TRANSFER_AMOUNT; + let txn = context.account_transfer_to(alice, bob.address(), transfer_amount); + let raw_txn = RawTransaction::new( + txn.sender(), + txn.sequence_number(), + txn.payload().clone(), + txn.max_gas_amount(), + 100, + txn.expiration_timestamp_secs(), + txn.chain_id(), + ); + let txn = SignedTransaction::new_signed_transaction( + raw_txn.clone(), + TransactionAuthenticator::FeePayer { + sender: AccountAuthenticator::NoAccountAuthenticator, + secondary_signer_addresses: vec![], + secondary_signers: vec![], + fee_payer_address: AccountAddress::ONE, + fee_payer_signer: AccountAuthenticator::NoAccountAuthenticator, + }, + ); + let body = bcs::to_bytes(&txn).unwrap(); + let resp = context + .expect_status_code(200) + .post_bcs_txn("/transactions/simulate", body) + .await; + assert!(!resp[0]["success"].as_bool().unwrap(), "{}", pretty(&resp)); + assert!( + resp[0]["vm_status"] + .as_str() + .unwrap() + .contains("INSUFFICIENT_BALANCE_FOR_TRANSACTION_FEE"), + "{}", + pretty(&resp) + ); + + let txn = SignedTransaction::new_signed_transaction( + raw_txn.clone(), + TransactionAuthenticator::FeePayer { + sender: AccountAuthenticator::NoAccountAuthenticator, + secondary_signer_addresses: vec![], + secondary_signers: vec![], + fee_payer_address: AccountAddress::ZERO, + fee_payer_signer: AccountAuthenticator::NoAccountAuthenticator, + }, + ); + let body = bcs::to_bytes(&txn).unwrap(); + let resp = context + .expect_status_code(200) + .post_bcs_txn("/transactions/simulate", body) + .await; + assert!(resp[0]["success"].as_bool().unwrap(), "{}", pretty(&resp)); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_bcs_simulate_automated_account_creation() { + let mut context = new_test_context(current_function_name!()); + let alice = &mut context.gen_account(); + let bob = &mut context.gen_account(); + + let transfer_amount: u64 = 0; + let txn = context.account_transfer_to(alice, bob.address(), transfer_amount); + let raw_txn = RawTransaction::new( + txn.sender(), + txn.sequence_number(), + txn.payload().clone(), + txn.max_gas_amount(), + 100, + txn.expiration_timestamp_secs(), + txn.chain_id(), + ); + // Replace the authenticator with a NoAccountAuthenticator in the transaction. + let txn = SignedTransaction::new_signed_transaction( + raw_txn.clone(), + TransactionAuthenticator::SingleSender { + sender: AccountAuthenticator::NoAccountAuthenticator, + }, + ); + + let body = bcs::to_bytes(&txn).unwrap(); + + let resp = context + .expect_status_code(200) + .post_bcs_txn("/transactions/simulate", body) + .await; + assert!(!resp[0]["success"].as_bool().unwrap(), "{}", pretty(&resp)); + assert!( + resp[0]["vm_status"] + .as_str() + .unwrap() + .contains("SENDING_ACCOUNT_DOES_NOT_EXIST"), + "{}", + pretty(&resp) + ); + + let txn = + SignedTransaction::new_signed_transaction(raw_txn, TransactionAuthenticator::FeePayer { + sender: AccountAuthenticator::NoAccountAuthenticator, + secondary_signer_addresses: vec![], + secondary_signers: vec![], + fee_payer_address: AccountAddress::ZERO, + fee_payer_signer: AccountAuthenticator::NoAccountAuthenticator, + }); + let body = bcs::to_bytes(&txn).unwrap(); + let resp = context + .expect_status_code(200) + .post_bcs_txn("/transactions/simulate", body) + .await; + assert!(resp[0]["success"].as_bool().unwrap(), "{}", pretty(&resp)); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_bcs_execute_simple_no_authenticator_fail() { + let transfer_amount: u64 = SMALL_TRANSFER_AMOUNT; + + let mut context = new_test_context(current_function_name!()); + let alice = &mut context.gen_account(); + let bob = &mut context.gen_account(); + let txn = context.mint_user_account(alice).await; + context.commit_block(&vec![txn]).await; + + // Construct a signed transaction. + let txn = context.account_transfer_to(alice, bob.address(), transfer_amount); + // Replace the authenticator with a NoAccountAuthenticator in the transaction. + let txn = SignedTransaction::new_signed_transaction( + txn.clone().into_raw_transaction(), + TransactionAuthenticator::SingleSender { + sender: AccountAuthenticator::NoAccountAuthenticator, + }, + ); + + let body = bcs::to_bytes(&txn).unwrap(); + + // expected to fail due to the use of NoAccountAuthenticator in an actual execution + let resp = context + .expect_status_code(400) + .post_bcs_txn("/transactions", body) + .await; + assert!(resp["message"] + .as_str() + .unwrap() + .contains("INVALID_SIGNATURE")); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_bcs_execute_fee_payer_transaction_no_authenticator_fail() { + let mut context = new_test_context(current_function_name!()); + let alice = &mut context.gen_account(); + let bob = &mut context.gen_account(); + let txn = context.mint_user_account(alice).await; + context.commit_block(&vec![txn]).await; + + let transfer_amount: u64 = SMALL_TRANSFER_AMOUNT; + let txn = context.account_transfer_to(alice, bob.address(), transfer_amount); + let raw_txn = RawTransaction::new( + txn.sender(), + txn.sequence_number(), + txn.payload().clone(), + txn.max_gas_amount(), + 100, + txn.expiration_timestamp_secs(), + txn.chain_id(), + ); + + let txn = SignedTransaction::new_signed_transaction( + raw_txn.clone(), + TransactionAuthenticator::FeePayer { + sender: AccountAuthenticator::NoAccountAuthenticator, + secondary_signer_addresses: vec![], + secondary_signers: vec![], + fee_payer_address: AccountAddress::ZERO, + fee_payer_signer: AccountAuthenticator::NoAccountAuthenticator, + }, + ); + let body = bcs::to_bytes(&txn).unwrap(); + let resp = context + .expect_status_code(400) + .post_bcs_txn("/transactions", body) + .await; + assert!(resp["message"] + .as_str() + .unwrap() + .contains("INVALID_SIGNATURE")); +} diff --git a/api/src/tests/state_test.rs b/api/src/tests/state_test.rs index 48ba32bc23444..e1ace9ca255e9 100644 --- a/api/src/tests/state_test.rs +++ b/api/src/tests/state_test.rs @@ -171,7 +171,7 @@ async fn test_merkle_leaves_with_nft_transfer() { let num_leaves_at_beginning = ctx .db - .get_state_leaf_count(ctx.db.get_latest_ledger_info_version().unwrap()) + .get_state_item_count(ctx.db.get_latest_ledger_info_version().unwrap()) .unwrap(); let transfer_to_owner_txn = creator.sign_multi_agent_with_transaction_builder( @@ -188,7 +188,7 @@ async fn test_merkle_leaves_with_nft_transfer() { ctx.commit_block(&vec![transfer_to_owner_txn]).await; let num_leaves_after_transfer_nft = ctx .db - .get_state_leaf_count(ctx.db.get_latest_ledger_info_version().unwrap()) + .get_state_item_count(ctx.db.get_latest_ledger_info_version().unwrap()) .unwrap(); assert_eq!( num_leaves_after_transfer_nft, @@ -209,7 +209,7 @@ async fn test_merkle_leaves_with_nft_transfer() { ctx.commit_block(&vec![transfer_to_creator_txn]).await; let num_leaves_after_return_nft = ctx .db - .get_state_leaf_count(ctx.db.get_latest_ledger_info_version().unwrap()) + .get_state_item_count(ctx.db.get_latest_ledger_info_version().unwrap()) .unwrap(); assert_eq!( diff --git a/api/src/tests/transactions_test.rs b/api/src/tests/transactions_test.rs index 0c5ef97e0db06..a05da2c713e49 100644 --- a/api/src/tests/transactions_test.rs +++ b/api/src/tests/transactions_test.rs @@ -3,7 +3,9 @@ // SPDX-License-Identifier: Apache-2.0 use super::new_test_context; -use crate::tests::new_test_context_with_config; +use crate::tests::{ + new_test_context_with_config, new_test_context_with_db_sharding_and_internal_indexer, +}; use aptos_api_test_context::{assert_json, current_function_name, pretty, TestContext}; use aptos_config::config::{GasEstimationStaticOverride, NodeConfig}; use aptos_crypto::{ @@ -19,7 +21,7 @@ use aptos_types::{ authenticator::{AuthenticationKey, TransactionAuthenticator}, EntryFunction, Script, SignedTransaction, }, - utility_coin::SUPRA_COIN_TYPE, + utility_coin::{SupraCoinType, CoinType}, }; use move_core_types::{ identifier::Identifier, @@ -750,13 +752,15 @@ async fn test_signing_message_with_payload( assert_eq!(ledger["ledger_version"].as_str().unwrap(), "3"); // metadata + user txn + state checkpoint } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_get_account_transactions() { - let mut context = new_test_context(current_function_name!()); +async fn test_account_transaction_with_context(mut context: TestContext) { let account = context.gen_account(); let txn = context.create_user_account(&account).await; context.commit_block(&vec![txn]).await; + if let Some(indexer_reader) = context.context.indexer_reader.as_ref() { + indexer_reader.wait_for_internal_indexer(2).unwrap(); + } + let txns = context .get( format!( @@ -771,6 +775,15 @@ async fn test_get_account_transactions() { assert_json(txns, expected_txns); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_get_account_transactions() { + let context = new_test_context(current_function_name!()); + test_account_transaction_with_context(context).await; + let shard_context = + new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + test_account_transaction_with_context(shard_context).await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_account_transactions_filter_transactions_by_start_sequence_number() { let mut context = new_test_context(current_function_name!()); @@ -866,7 +879,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_address() { "0x1222", "Coin", "transfer", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), @@ -885,7 +898,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_module_name() { "0x1", "CoinInvalid", "transfer", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), @@ -904,7 +917,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_name() { "0x1", "Coin", "transfer_invalid", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), @@ -923,7 +936,7 @@ async fn test_get_txn_execute_failed_by_invalid_entry_function_arguments() { "0x1", "Coin", "transfer", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u8).unwrap(), // invalid type @@ -942,7 +955,7 @@ async fn test_get_txn_execute_failed_by_missing_entry_function_arguments() { "0x1", "Coin", "transfer", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), // missing arguments @@ -965,7 +978,7 @@ async fn test_get_txn_execute_failed_by_entry_function_validation() { "0x1", "Coin", "transfer", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&123u64).unwrap(), // exceed limit, account balance is 0. @@ -988,7 +1001,7 @@ async fn test_get_txn_execute_failed_by_entry_function_invalid_module_name() { "0x1", "coin", "transfer::what::what", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&123u64).unwrap(), // exceed limit, account balance is 0. @@ -1011,7 +1024,7 @@ async fn test_get_txn_execute_failed_by_entry_function_invalid_function_name() { "0x1", "coin::coin", "transfer", - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&123u64).unwrap(), // exceed limit, account balance is 0. @@ -1552,7 +1565,7 @@ async fn test_simulation_failure_with_detail_error() { Identifier::new("MemeCoin").unwrap(), ), Identifier::new("transfer").unwrap(), - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![ bcs::to_bytes(&AccountAddress::from_hex_literal("0xdd").unwrap()).unwrap(), bcs::to_bytes(&1u64).unwrap(), diff --git a/api/src/transactions.rs b/api/src/transactions.rs index 76e78c8bea425..4d972b17161e4 100644 --- a/api/src/transactions.rs +++ b/api/src/transactions.rs @@ -37,7 +37,7 @@ use aptos_types::{ RawTransactionWithData, SignedTransaction, TransactionPayload, }, vm_status::StatusCode, - SUPRA_COIN_TYPE, + SupraCoinType, CoinType, }; use aptos_vm::{AptosSimulationVM, AptosVM}; use move_core_types::{ident_str, language_storage::ModuleId, vm_status::VMStatus}; @@ -568,7 +568,7 @@ impl TransactionsApi { &state_view, ModuleId::new(AccountAddress::ONE, ident_str!("coin").into()), ident_str!("balance").into(), - vec![SUPRA_COIN_TYPE.clone()], + vec![SupraCoinType::type_tag()], vec![signed_transaction.sender().to_vec()], context.node_config.api.max_gas_view_function, ); @@ -995,7 +995,7 @@ impl TransactionsApi { address.into(), page.start_option(), page.limit(&latest_ledger_info)?, - latest_ledger_info.version(), + account.ledger_version, &latest_ledger_info, )?; match accept_type { @@ -1435,8 +1435,7 @@ impl TransactionsApi { let mut user_transactions = Vec::new(); for transaction in transactions.into_iter() { match transaction { - Transaction::UserTransaction(user_txn) => { - let mut txn = *user_txn; + Transaction::UserTransaction(mut user_txn) => { match &vm_status { VMStatus::Error { message: Some(msg), .. @@ -1444,13 +1443,13 @@ impl TransactionsApi { | VMStatus::ExecutionFailure { message: Some(msg), .. } => { - txn.info.vm_status += + user_txn.info.vm_status += format!("\nExecution failed with message: {}", msg) .as_str(); }, _ => (), } - user_transactions.push(txn); + user_transactions.push(user_txn); }, _ => { return Err(SubmitTransactionError::internal_with_code( diff --git a/api/test-context/Cargo.toml b/api/test-context/Cargo.toml index 9c4a9e61a8858..db9be88d6dee1 100644 --- a/api/test-context/Cargo.toml +++ b/api/test-context/Cargo.toml @@ -23,6 +23,7 @@ aptos-executor = { workspace = true } aptos-executor-types = { workspace = true } aptos-framework = { workspace = true } aptos-genesis = { workspace = true } +aptos-indexer-grpc-table-info = { workspace = true } aptos-mempool = { workspace = true, features = ["fuzzing"] } aptos-mempool-notifications = { workspace = true } aptos-sdk = { workspace = true } diff --git a/api/test-context/src/test_context.rs b/api/test-context/src/test_context.rs index 609019db0d513..4f17097f7c271 100644 --- a/api/test-context/src/test_context.rs +++ b/api/test-context/src/test_context.rs @@ -10,7 +10,7 @@ use aptos_api_types::{ use aptos_cached_packages::aptos_stdlib; use aptos_config::{ config::{ - NodeConfig, RocksdbConfigs, StorageDirPaths, BUFFERED_STATE_TARGET_ITEMS, + NodeConfig, RocksdbConfigs, StorageDirPaths, BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, NO_OP_STORAGE_PRUNER_CONFIG, }, keys::ConfigKey, @@ -20,6 +20,7 @@ use aptos_db::AptosDB; use aptos_executor::{block_executor::BlockExecutor, db_bootstrapper}; use aptos_executor_types::BlockExecutorTrait; use aptos_framework::BuiltPackage; +use aptos_indexer_grpc_table_info::internal_indexer_db_service::MockInternalIndexerDBService; use aptos_mempool::mocks::MockSharedMempool; use aptos_mempool_notifications::MempoolNotificationSender; use aptos_sdk::{ @@ -94,7 +95,7 @@ impl ApiSpecificConfig { pub fn new_test_context( test_name: String, - node_config: NodeConfig, + mut node_config: NodeConfig, use_db_with_indexer: bool, ) -> TestContext { // Speculative logging uses a global variable and when many instances use it together, they @@ -119,17 +120,27 @@ pub fn new_test_context( let validator_owner = validator_identity.account_address.unwrap(); let (db, db_rw) = if use_db_with_indexer { - DbReaderWriter::wrap(AptosDB::new_for_test_with_indexer(&tmp_dir)) + DbReaderWriter::wrap(AptosDB::new_for_test_with_indexer( + &tmp_dir, + node_config.storage.rocksdb_configs.enable_storage_sharding, + )) } else { DbReaderWriter::wrap( AptosDB::open( StorageDirPaths::from_path(&tmp_dir), false, /* readonly */ NO_OP_STORAGE_PRUNER_CONFIG, /* pruner */ - RocksdbConfigs::default(), + RocksdbConfigs { + enable_storage_sharding: node_config + .storage + .rocksdb_configs + .enable_storage_sharding, + ..Default::default() + }, false, /* indexer */ - BUFFERED_STATE_TARGET_ITEMS, + BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, + None, ) .unwrap(), ) @@ -140,12 +151,18 @@ pub fn new_test_context( let mempool = MockSharedMempool::new_in_runtime(&db_rw, VMValidator::new(db.clone())); + node_config + .storage + .set_data_dir(tmp_dir.path().to_path_buf()); + let mock_indexer_service = + MockInternalIndexerDBService::new_for_test(db_rw.reader.clone(), &node_config); + let context = Context::new( ChainId::test(), db.clone(), mempool.ac_client.clone(), node_config.clone(), - None, /* table info reader */ + mock_indexer_service.get_indexer_reader(), ); // Configure the testing depending on which API version we're testing. diff --git a/api/types/src/bytecode.rs b/api/types/src/bytecode.rs index 83e6ab0b8d89f..0f9a40e243805 100644 --- a/api/types/src/bytecode.rs +++ b/api/types/src/bytecode.rs @@ -46,6 +46,16 @@ pub trait Bytecode { fn function_is_view(&self, name: &IdentStr) -> bool; + fn struct_is_event(&self, name: &IdentStr) -> bool { + match self.metadata() { + Some(m) => match m.struct_attributes.get(name.as_str()) { + Some(attrs) => attrs.iter().any(|attr| attr.is_event()), + None => false, + }, + None => false, + } + } + fn new_move_struct_field(&self, def: &FieldDefinition) -> MoveStructField { MoveStructField { name: self.identifier_at(def.name).to_owned().into(), @@ -109,8 +119,13 @@ pub trait Bytecode { .map(|f| self.new_move_struct_field(f)) .collect(), ), + StructFieldInformation::DeclaredVariants(..) => { + // TODO(#13806): implement for enums. Currently we pretend they don't have fields + (false, vec![]) + }, }; let name = self.identifier_at(handle.name).to_owned(); + let is_event = self.struct_is_event(&name); let abilities = handle .abilities .into_iter() @@ -124,6 +139,7 @@ pub trait Bytecode { MoveStruct { name: name.into(), is_native, + is_event, abilities, generic_type_params, fields, diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index d7d160fb1d585..c5101f72d7d2e 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -45,6 +45,7 @@ use move_core_types::{ ident_str, identifier::{IdentStr, Identifier}, language_storage::{ModuleId, StructTag, TypeTag}, + transaction_argument::convert_txn_args, value::{MoveStructLayout, MoveTypeLayout}, }; use serde_json::Value; @@ -154,7 +155,10 @@ impl<'a, S: StateView> MoveConverter<'a, S> { &self, typ: &StructTag, bytes: &'_ [u8], - ) -> Result> { + ) -> Result<( + Option, + Vec<(Identifier, move_core_types::value::MoveValue)>, + )> { self.inner.view_struct_fields(typ, bytes) } @@ -272,7 +276,26 @@ impl<'a, S: StateView> MoveConverter<'a, S> { ) -> Result { use aptos_types::transaction::TransactionPayload::*; let ret = match payload { - Script(s) => TransactionPayload::ScriptPayload(s.try_into()?), + Script(s) => { + let (code, ty_args, args) = s.into_inner(); + let script_args = self.inner.view_script_arguments(&code, &args, &ty_args); + + let json_args = match script_args { + Ok(values) => values + .into_iter() + .map(|v| MoveValue::try_from(v)?.json()) + .collect::>()?, + Err(_e) => convert_txn_args(&args) + .into_iter() + .map(|arg| HexEncodedBytes::from(arg).json()) + .collect::>()?, + }; + TransactionPayload::ScriptPayload(ScriptPayload { + code: MoveScriptBytecode::new(code).try_parse_abi(), + type_arguments: ty_args.into_iter().map(|arg| arg.into()).collect(), + arguments: json_args, + }) + }, EntryFunction(fun) => { let (module, function, ty_args, args) = fun.into_inner(); let func_args = self @@ -988,14 +1011,9 @@ impl<'a, S: StateView> MoveConverter<'a, S> { fn get_table_info(&self, handle: TableHandle) -> Result> { if let Some(indexer_reader) = self.indexer_reader.as_ref() { - // Attempt to get table_info from the indexer_reader if it exists - Ok(indexer_reader.get_table_info(handle)?) - } else if self.db.indexer_enabled() { - // Attempt to get table_info from the db if indexer is enabled - Ok(Some(self.db.get_table_info(handle)?)) - } else { - Ok(None) + return Ok(indexer_reader.get_table_info(handle).unwrap_or(None)); } + Ok(None) } pub fn explain_vm_status( @@ -1003,7 +1021,13 @@ impl<'a, S: StateView> MoveConverter<'a, S> { status: &ExecutionStatus, txn_aux_data: Option, ) -> String { - match status { + let mut status = status.to_owned(); + status = if let Some(aux_data) = txn_aux_data { + ExecutionStatus::aug_with_aux_data(status, &aux_data) + } else { + status + }; + match &status { ExecutionStatus::MoveAbort { location, code, @@ -1055,17 +1079,10 @@ impl<'a, S: StateView> MoveConverter<'a, S> { ) }, ExecutionStatus::MiscellaneousError(code) => { - if txn_aux_data.is_none() && code.is_none() { + if code.is_none() { "Execution failed with miscellaneous error and no status code".to_owned() - } else if code.is_some() { - format!("{:#?}", code.unwrap()) } else { - let aux_data = txn_aux_data.unwrap(); - let vm_details = aux_data.get_detail_error_message(); - vm_details.map_or( - "Execution failed with miscellaneous error and no status code".to_owned(), - |e| format!("{:#?}", e.status_code()), - ) + format!("{:#?}", code.unwrap()) } }, } diff --git a/api/types/src/error.rs b/api/types/src/error.rs index cb0d61bc67ffb..3f7454f50c9b8 100644 --- a/api/types/src/error.rs +++ b/api/types/src/error.rs @@ -53,7 +53,7 @@ impl AptosError { /// These codes provide more granular error information beyond just the HTTP /// status code of the response. -#[derive(Copy, Clone, Debug, Serialize, Deserialize, Enum)] +#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize, Enum)] #[oai(rename_all = "snake_case")] #[serde(rename_all = "snake_case")] #[repr(u32)] diff --git a/api/types/src/ledger_info.rs b/api/types/src/ledger_info.rs index ef912190c94c9..97438ae104013 100644 --- a/api/types/src/ledger_info.rs +++ b/api/types/src/ledger_info.rs @@ -40,6 +40,26 @@ impl LedgerInfo { } } + pub fn new_ledger_info( + chain_id: &ChainId, + epoch: u64, + ledger_version: u64, + oldest_ledger_version: u64, + oldest_block_height: u64, + block_height: u64, + ledger_timestamp: u64, + ) -> Self { + Self { + chain_id: chain_id.id(), + epoch: epoch.into(), + ledger_version: ledger_version.into(), + oldest_ledger_version: oldest_ledger_version.into(), + block_height: block_height.into(), + oldest_block_height: oldest_block_height.into(), + ledger_timestamp: ledger_timestamp.into(), + } + } + pub fn epoch(&self) -> u64 { self.epoch.into() } diff --git a/api/types/src/lib.rs b/api/types/src/lib.rs index ccb1da50d0a8c..e7ab206cde38a 100644 --- a/api/types/src/lib.rs +++ b/api/types/src/lib.rs @@ -49,9 +49,9 @@ pub use transaction::{ DirectWriteSet, Ed25519Signature, EncodeSubmissionRequest, EntryFunctionPayload, Event, FeePayerSignature, GasEstimation, GasEstimationBcs, GenesisPayload, GenesisTransaction, MultiAgentSignature, MultiEd25519Signature, MultiKeySignature, MultisigPayload, - MultisigTransactionPayload, PendingTransaction, PublicKey, ScriptPayload, ScriptWriteSet, - Signature, SingleKeySignature, SubmitTransactionRequest, Transaction, TransactionData, - TransactionId, TransactionInfo, TransactionOnChainData, TransactionPayload, + MultisigTransactionPayload, NoAccountSignature, PendingTransaction, PublicKey, ScriptPayload, + ScriptWriteSet, Signature, SingleKeySignature, SubmitTransactionRequest, Transaction, + TransactionData, TransactionId, TransactionInfo, TransactionOnChainData, TransactionPayload, TransactionSignature, TransactionSigningMessage, TransactionsBatchSingleSubmissionFailure, TransactionsBatchSubmissionResult, UserCreateSigningMessageRequest, UserTransaction, UserTransactionRequest, VersionedEvent, WriteModule, WriteResource, WriteSet, WriteSetChange, diff --git a/api/types/src/move_types.rs b/api/types/src/move_types.rs index 38aaadbf82c7f..885b01e39b3cd 100644 --- a/api/types/src/move_types.rs +++ b/api/types/src/move_types.rs @@ -225,6 +225,12 @@ impl TryFrom for MoveStructValue { fn try_from(s: AnnotatedMoveStruct) -> anyhow::Result { let mut map = BTreeMap::new(); + if let Some((_, name)) = s.variant_info { + map.insert( + IdentifierWrapper::from_str("__variant__")?, + MoveValue::String(name.to_string()).json()?, + ); + } for (id, val) in s.value { map.insert(id.into(), MoveValue::try_from(val)?.json()?); } @@ -324,6 +330,7 @@ impl From for MoveValue { TransactionArgument::Bool(v) => MoveValue::Bool(v), TransactionArgument::Address(v) => MoveValue::Address(v.into()), TransactionArgument::U8Vector(bytes) => MoveValue::Bytes(HexEncodedBytes(bytes)), + TransactionArgument::Serialized(bytes) => MoveValue::Bytes(HexEncodedBytes(bytes)), } } } @@ -853,6 +860,8 @@ pub struct MoveStruct { pub name: IdentifierWrapper, /// Whether the struct is a native struct of Move pub is_native: bool, + /// Whether the struct is marked with the #[event] annotation + pub is_event: bool, /// Abilities associated with the struct pub abilities: Vec, /// Generic types associated with the struct @@ -1506,6 +1515,7 @@ mod tests { AnnotatedMoveStruct { abilities: AbilitySet::EMPTY, ty_tag: type_struct(typ), + variant_info: None, value: values, } } diff --git a/api/types/src/transaction.rs b/api/types/src/transaction.rs index 602d40607c2b3..4af7dae0b843e 100755 --- a/api/types/src/transaction.rs +++ b/api/types/src/transaction.rs @@ -176,7 +176,7 @@ impl #[oai(one_of, discriminator_name = "type", rename_all = "snake_case")] pub enum Transaction { PendingTransaction(PendingTransaction), - UserTransaction(Box), + UserTransaction(UserTransaction), GenesisTransaction(GenesisTransaction), BlockMetadataTransaction(BlockMetadataTransaction), StateCheckpointTransaction(StateCheckpointTransaction), @@ -292,12 +292,12 @@ impl u64, ), ) -> Self { - Transaction::UserTransaction(Box::new(UserTransaction { + Transaction::UserTransaction(UserTransaction { info, request: (txn, payload).into(), events, timestamp: timestamp.into(), - })) + }) } } @@ -1412,7 +1412,12 @@ impl VerifyInput for KeylessSignature { fn verify(&self) -> anyhow::Result<()> { let public_key_len = self.public_key.inner().len(); let signature_len = self.signature.inner().len(); - if public_key_len > keyless::KeylessPublicKey::MAX_LEN { + if public_key_len + > std::cmp::max( + keyless::KeylessPublicKey::MAX_LEN, + keyless::FederatedKeylessPublicKey::MAX_LEN, + ) + { bail!( "Keyless public key length is greater than the maximum number of {} bytes: found {} bytes", keyless::KeylessPublicKey::MAX_LEN, public_key_len @@ -1492,6 +1497,17 @@ impl Keyless { } } +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Object)] +pub struct FederatedKeyless { + pub value: HexEncodedBytes, +} + +impl FederatedKeyless { + pub fn new(value: HexEncodedBytes) -> Self { + Self { value } + } +} + impl TryFrom for AnySignature { type Error = anyhow::Error; @@ -1534,6 +1550,7 @@ pub enum PublicKey { Secp256k1Ecdsa(Secp256k1Ecdsa), Secp256r1Ecdsa(Secp256r1Ecdsa), Keyless(Keyless), + FederatedKeyless(FederatedKeyless), } impl TryFrom for AnyPublicKey { @@ -1549,6 +1566,9 @@ impl TryFrom for AnyPublicKey { AnyPublicKey::secp256r1_ecdsa(p.value.inner().try_into()?) }, PublicKey::Keyless(p) => AnyPublicKey::keyless(p.value.inner().try_into()?), + PublicKey::FederatedKeyless(p) => { + AnyPublicKey::federated_keyless(p.value.inner().try_into()?) + }, }) } } @@ -1568,6 +1588,9 @@ impl From for PublicKey { AnyPublicKey::Keyless { public_key } => { PublicKey::Keyless(Keyless::new(public_key.to_bytes().into())) }, + AnyPublicKey::FederatedKeyless { public_key } => { + PublicKey::FederatedKeyless(FederatedKeyless::new(public_key.to_bytes().into())) + }, } } } @@ -1604,6 +1627,11 @@ impl VerifyInput for SingleKeySignature { signature: s.value.clone(), } .verify(), + (PublicKey::FederatedKeyless(p), Signature::Keyless(s)) => KeylessSignature { + public_key: p.value.clone(), + signature: s.value.clone(), + } + .verify(), _ => bail!("Invalid public key, signature match."), } } @@ -1649,6 +1677,12 @@ impl TryFrom for AccountAuthenticator { )?; AnyPublicKey::keyless(key) }, + PublicKey::FederatedKeyless(p) => { + let key = p.value.inner().try_into().context( + "Failed to parse given public_key bytes as AnyPublicKey::FederatedKeyless", + )?; + AnyPublicKey::keyless(key) + }, }; let signature = match value.signature { @@ -1751,6 +1785,12 @@ impl TryFrom for AccountAuthenticator { )?; AnyPublicKey::keyless(key) }, + PublicKey::FederatedKeyless(p) => { + let key = p.value.inner().try_into().context( + "Failed to parse given public_key bytes as AnyPublicKey::FederatedKeyless", + )?; + AnyPublicKey::federated_keyless(key) + }, }; public_keys.push(key); } @@ -1794,6 +1834,33 @@ impl TryFrom for AccountAuthenticator { } } +/// A placeholder to represent the absence of account signature +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Object)] +pub struct NoAccountSignature; + +impl VerifyInput for NoAccountSignature { + fn verify(&self) -> anyhow::Result<()> { + Ok(()) + } +} + +impl TryFrom for TransactionAuthenticator { + type Error = anyhow::Error; + + fn try_from(signature: NoAccountSignature) -> Result { + let account_auth = signature.try_into()?; + Ok(TransactionAuthenticator::single_sender(account_auth)) + } +} + +impl TryFrom for AccountAuthenticator { + type Error = anyhow::Error; + + fn try_from(_value: NoAccountSignature) -> Result { + Ok(AccountAuthenticator::NoAccountAuthenticator) + } +} + /// Account signature scheme /// /// The account signature scheme allows you to have two types of accounts: @@ -1809,6 +1876,7 @@ pub enum AccountSignature { MultiEd25519Signature(MultiEd25519Signature), SingleKeySignature(SingleKeySignature), MultiKeySignature(MultiKeySignature), + NoAccountSignature(NoAccountSignature), } impl VerifyInput for AccountSignature { @@ -1818,6 +1886,7 @@ impl VerifyInput for AccountSignature { AccountSignature::MultiEd25519Signature(inner) => inner.verify(), AccountSignature::SingleKeySignature(inner) => inner.verify(), AccountSignature::MultiKeySignature(inner) => inner.verify(), + AccountSignature::NoAccountSignature(inner) => inner.verify(), } } } @@ -1831,6 +1900,7 @@ impl TryFrom for AccountAuthenticator { AccountSignature::MultiEd25519Signature(s) => s.try_into()?, AccountSignature::SingleKeySignature(s) => s.try_into()?, AccountSignature::MultiKeySignature(s) => s.try_into()?, + AccountSignature::NoAccountSignature(s) => s.try_into()?, }) } } @@ -1991,6 +2061,7 @@ impl From<&AccountAuthenticator> for AccountSignature { signatures_required: public_keys.signatures_required(), }) }, + NoAccountAuthenticator => AccountSignature::NoAccountSignature(NoAccountSignature), } } } diff --git a/aptos-move/aptos-abstract-gas-usage/Cargo.toml b/aptos-move/aptos-abstract-gas-usage/Cargo.toml index b1af513ea2e2e..d1a5026aea75f 100644 --- a/aptos-move/aptos-abstract-gas-usage/Cargo.toml +++ b/aptos-move/aptos-abstract-gas-usage/Cargo.toml @@ -2,7 +2,7 @@ name = "aptos-abstract-gas-usage" version = "0.1.0" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +license = { workspace = true } [dependencies] anyhow = { workspace = true } diff --git a/aptos-move/aptos-debugger/Cargo.toml b/aptos-move/aptos-debugger/Cargo.toml index 208fabb573c27..896f868ddcfc3 100644 --- a/aptos-move/aptos-debugger/Cargo.toml +++ b/aptos-move/aptos-debugger/Cargo.toml @@ -14,6 +14,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } +aptos-block-executor = { workspace = true } aptos-consensus = { workspace = true } aptos-crypto = { workspace = true } aptos-gas-profiling = { workspace = true } @@ -26,6 +27,7 @@ aptos-vm-logging = { workspace = true } aptos-vm-types = { workspace = true } bcs = { workspace = true } clap = { workspace = true } +itertools = { workspace = true } regex = { workspace = true } reqwest = { workspace = true } tokio = { workspace = true } diff --git a/aptos-move/aptos-debugger/src/aptos_debugger.rs b/aptos-move/aptos-debugger/src/aptos_debugger.rs index 1fc70796f6cbc..8a8c1409dec54 100644 --- a/aptos-move/aptos-debugger/src/aptos_debugger.rs +++ b/aptos-move/aptos-debugger/src/aptos_debugger.rs @@ -2,24 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, format_err, Result}; +use aptos_block_executor::txn_commit_hook::NoOpTransactionCommitHook; use aptos_gas_profiling::{GasProfiler, TransactionGasLog}; use aptos_rest_client::Client; use aptos_types::{ account_address::AccountAddress, + block_executor::config::{ + BlockExecutorConfig, BlockExecutorConfigFromOnchain, BlockExecutorLocalConfig, + }, state_store::TStateView, transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, SignedTransaction, - Transaction, TransactionInfo, TransactionOutput, TransactionPayload, Version, + signature_verified_transaction::SignatureVerifiedTransaction, BlockOutput, + SignedTransaction, Transaction, TransactionInfo, TransactionOutput, TransactionPayload, + Version, }, vm_status::VMStatus, }; use aptos_validator_interface::{ AptosValidatorInterface, DBDebuggerInterface, DebuggerStateView, RestDebuggerInterface, }; -use aptos_vm::{data_cache::AsMoveResolver, AptosVM, VMExecutor}; +use aptos_vm::{ + block_executor::{AptosTransactionOutput, BlockAptosVM}, + data_cache::AsMoveResolver, + AptosVM, +}; use aptos_vm_logging::log_schema::AdapterLogSchema; use aptos_vm_types::output::VMOutput; -use std::{path::Path, sync::Arc}; +use itertools::Itertools; +use std::{path::Path, sync::Arc, time::Instant}; pub struct AptosDebugger { debugger: Arc, @@ -45,31 +55,50 @@ impl AptosDebugger { version: Version, txns: Vec, repeat_execution_times: u64, + concurrency_levels: &[usize], ) -> Result> { let sig_verified_txns: Vec = txns.into_iter().map(|x| x.into()).collect::>(); let state_view = DebuggerStateView::new(self.debugger.clone(), version); - let result = AptosVM::execute_block_no_limit(&sig_verified_txns, &state_view) - .map_err(|err| format_err!("Unexpected VM Error: {:?}", err))?; + print_transaction_stats(&sig_verified_txns, version); - for i in 1..repeat_execution_times { - let repeat_result = AptosVM::execute_block_no_limit(&sig_verified_txns, &state_view) - .map_err(|err| format_err!("Unexpected VM Error: {:?}", err))?; - println!( - "Finished execution round {}/{} with {} transactions", - i, - repeat_execution_times, - sig_verified_txns.len() - ); - if !Self::ensure_output_matches(&repeat_result, &result, version) { - bail!( - "Execution result mismatched in round {}/{}", - i, - repeat_execution_times + let mut result = None; + + for concurrency_level in concurrency_levels { + for i in 0..repeat_execution_times { + let start_time = Instant::now(); + let cur_result = + execute_block_no_limit(&sig_verified_txns, &state_view, *concurrency_level) + .map_err(|err| format_err!("Unexpected VM Error: {:?}", err))?; + + println!( + "[{} txns from {}] Finished execution round {}/{} with concurrency_level={} in {}ms", + sig_verified_txns.len(), + version, + i + 1, + repeat_execution_times, + concurrency_level, + start_time.elapsed().as_millis(), ); + + match &result { + None => result = Some(cur_result), + Some(prev_result) => { + if !Self::ensure_output_matches(&cur_result, prev_result, version) { + bail!( + "Execution result mismatched in round {}/{}", + i, + repeat_execution_times + ); + } + }, + } } } + + let result = result.unwrap(); + assert_eq!(sig_verified_txns.len(), result.len()); Ok(result) } @@ -121,33 +150,39 @@ impl AptosDebugger { pub async fn execute_past_transactions( &self, - mut begin: Version, - mut limit: u64, + begin: Version, + limit: u64, + use_same_block_boundaries: bool, repeat_execution_times: u64, + concurrency_levels: &[usize], ) -> Result> { - let (mut txns, mut txn_infos) = self + let (txns, txn_infos) = self .debugger .get_committed_transactions(begin, limit) .await?; - let mut ret = vec![]; - while limit != 0 { - println!( - "Starting epoch execution at {:?}, {:?} transactions remaining", - begin, limit - ); - let mut epoch_result = self - .execute_transactions_by_epoch(begin, txns.clone(), repeat_execution_times) - .await?; - begin += epoch_result.len() as u64; - limit -= epoch_result.len() as u64; - txns = txns.split_off(epoch_result.len()); - let epoch_txn_infos = txn_infos.drain(0..epoch_result.len()).collect::>(); - Self::print_mismatches(&epoch_result, &epoch_txn_infos, begin); - - ret.append(&mut epoch_result); + if use_same_block_boundaries { + // when going block by block, no need to worry about epoch boundaries + // as new epoch is always a new block. + Ok(self + .execute_transactions_by_block( + begin, + txns.clone(), + repeat_execution_times, + concurrency_levels, + ) + .await?) + } else { + self.execute_transactions_by_epoch( + limit, + begin, + txns, + repeat_execution_times, + concurrency_levels, + txn_infos, + ) + .await } - Ok(ret) } fn print_mismatches( @@ -186,13 +221,19 @@ impl AptosDebugger { all_match } - pub async fn execute_transactions_by_epoch( + async fn execute_transactions_until_epoch_end( &self, begin: Version, txns: Vec, repeat_execution_times: u64, + concurrency_levels: &[usize], ) -> Result> { - let results = self.execute_transactions_at_version(begin, txns, repeat_execution_times)?; + let results = self.execute_transactions_at_version( + begin, + txns, + repeat_execution_times, + concurrency_levels, + )?; let mut ret = vec![]; let mut is_reconfig = false; @@ -208,6 +249,78 @@ impl AptosDebugger { Ok(ret) } + async fn execute_transactions_by_epoch( + &self, + mut limit: u64, + mut begin: u64, + mut txns: Vec, + repeat_execution_times: u64, + concurrency_levels: &[usize], + mut txn_infos: Vec, + ) -> Result> { + let mut ret = vec![]; + while limit != 0 { + println!( + "Starting epoch execution at {:?}, {:?} transactions remaining", + begin, limit + ); + + let mut epoch_result = self + .execute_transactions_until_epoch_end( + begin, + txns.clone(), + repeat_execution_times, + concurrency_levels, + ) + .await?; + begin += epoch_result.len() as u64; + limit -= epoch_result.len() as u64; + txns = txns.split_off(epoch_result.len()); + let epoch_txn_infos = txn_infos.drain(0..epoch_result.len()).collect::>(); + Self::print_mismatches(&epoch_result, &epoch_txn_infos, begin); + + ret.append(&mut epoch_result); + } + Ok(ret) + } + + async fn execute_transactions_by_block( + &self, + begin: Version, + txns: Vec, + repeat_execution_times: u64, + concurrency_levels: &[usize], + ) -> Result> { + let mut ret = vec![]; + let mut cur = vec![]; + let mut cur_version = begin; + for txn in txns { + if txn.is_block_start() && !cur.is_empty() { + let to_execute = std::mem::take(&mut cur); + let results = self.execute_transactions_at_version( + cur_version, + to_execute, + repeat_execution_times, + concurrency_levels, + )?; + cur_version += results.len() as u64; + ret.extend(results); + } + cur.push(txn); + } + if !cur.is_empty() { + let results = self.execute_transactions_at_version( + cur_version, + cur, + repeat_execution_times, + concurrency_levels, + )?; + ret.extend(results); + } + + Ok(ret) + } + pub async fn get_version_by_account_sequence( &self, account: AccountAddress, @@ -237,6 +350,67 @@ impl AptosDebugger { } } +fn print_transaction_stats(sig_verified_txns: &[SignatureVerifiedTransaction], version: u64) { + let transaction_types = sig_verified_txns + .iter() + .map(|txn| txn.expect_valid().type_name().to_string()) + // conflate same consecutive elements into one with count + .chunk_by(|k| k.clone()) + .into_iter() + .map(|(k, r)| { + let num = r.count(); + if num > 1 { + format!("{} {}s", num, k) + } else { + k + } + }) + .collect::>(); + let entry_functions = sig_verified_txns + .iter() + .filter_map(|txn| { + txn.expect_valid() + .try_as_signed_user_txn() + .map(|txn| match &txn.payload() { + TransactionPayload::EntryFunction(txn) => format!( + "entry: {:?}::{:?}", + txn.module().name.as_str(), + txn.function().as_str() + ), + TransactionPayload::Script(_) => "script".to_string(), + TransactionPayload::ModuleBundle(_) => panic!("deprecated module bundle"), + TransactionPayload::Multisig(_) => "multisig".to_string(), + }) + }) + // Count number of instances for each (irrsepsecitve of order) + .sorted() + .chunk_by(|k| k.clone()) + .into_iter() + .map(|(k, r)| (r.count(), k)) + .sorted_by_key(|(num, _k)| *num) + .rev() + .map(|(num, k)| { + if num > 1 { + format!("{} {}s", num, k) + } else { + k + } + }) + .collect::>(); + println!( + "[{} txns from {}] Transaction types: {:?}", + sig_verified_txns.len(), + version, + transaction_types + ); + println!( + "[{} txns from {}] Entry Functions {:?}", + sig_verified_txns.len(), + version, + entry_functions + ); +} + fn is_reconfiguration(vm_output: &TransactionOutput) -> bool { let new_epoch_event_key = aptos_types::on_chain_config::new_epoch_event_key(); vm_output @@ -244,3 +418,24 @@ fn is_reconfiguration(vm_output: &TransactionOutput) -> bool { .iter() .any(|event| event.event_key() == Some(&new_epoch_event_key)) } + +fn execute_block_no_limit( + sig_verified_txns: &[SignatureVerifiedTransaction], + state_view: &DebuggerStateView, + concurrency_level: usize, +) -> Result, VMStatus> { + BlockAptosVM::execute_block::<_, NoOpTransactionCommitHook>( + sig_verified_txns, + state_view, + BlockExecutorConfig { + local: BlockExecutorLocalConfig { + concurrency_level, + allow_fallback: true, + discard_failed_blocks: false, + }, + onchain: BlockExecutorConfigFromOnchain::new_no_block_limit(), + }, + None, + ) + .map(BlockOutput::into_transaction_outputs_forced) +} diff --git a/aptos-move/aptos-debugger/src/bcs_txn_decoder.rs b/aptos-move/aptos-debugger/src/bcs_txn_decoder.rs index b444aa2311ff1..204b04621f2b0 100644 --- a/aptos-move/aptos-debugger/src/bcs_txn_decoder.rs +++ b/aptos-move/aptos-debugger/src/bcs_txn_decoder.rs @@ -5,7 +5,6 @@ use crate::aptos_debugger::AptosDebugger; use anyhow::Result; use aptos_rest_client::Client; use aptos_types::transaction::SignedTransaction; -use aptos_vm::AptosVM; use clap::Parser; use regex::Regex; use std::io; @@ -65,14 +64,15 @@ impl Command { ); if self.execute { - AptosVM::set_concurrency_level_once(self.concurrency_level); println!(); println!("==============================="); println!("Transaction re-execution result"); println!("==============================="); println!( "{:#?}", - debugger.execute_past_transactions(version, 1, 1).await? + debugger + .execute_past_transactions(version, 1, false, 1, &[self.concurrency_level]) + .await? ); } diff --git a/aptos-move/aptos-debugger/src/common.rs b/aptos-move/aptos-debugger/src/common.rs index 6b250b8108b42..584a040fb59fa 100644 --- a/aptos-move/aptos-debugger/src/common.rs +++ b/aptos-move/aptos-debugger/src/common.rs @@ -27,8 +27,8 @@ pub struct Opts { #[clap(flatten)] pub(crate) target: Target, - #[clap(long, default_value_t = 1)] - pub(crate) concurrency_level: usize, + #[clap(long, num_args = 0..)] + pub(crate) concurrency_level: Vec, } #[derive(Parser)] diff --git a/aptos-move/aptos-debugger/src/execute_past_transactions.rs b/aptos-move/aptos-debugger/src/execute_past_transactions.rs index 9c949b2a877da..0403e3f51e6db 100644 --- a/aptos-move/aptos-debugger/src/execute_past_transactions.rs +++ b/aptos-move/aptos-debugger/src/execute_past_transactions.rs @@ -4,7 +4,6 @@ use crate::{aptos_debugger::AptosDebugger, common::Opts}; use anyhow::Result; use aptos_rest_client::Client; -use aptos_vm::AptosVM; use clap::Parser; use url::Url; @@ -24,12 +23,13 @@ pub struct Command { #[clap(long)] repeat_execution_times: Option, + + #[clap(long)] + use_same_block_boundaries: bool, } impl Command { pub async fn run(self) -> Result<()> { - AptosVM::set_concurrency_level_once(self.opts.concurrency_level); - let debugger = if let Some(rest_endpoint) = self.opts.target.rest_endpoint { AptosDebugger::rest_client(Client::new(Url::parse(&rest_endpoint)?))? } else if let Some(db_path) = self.opts.target.db_path { @@ -42,7 +42,9 @@ impl Command { .execute_past_transactions( self.begin_version, self.limit, + self.use_same_block_boundaries, self.repeat_execution_times.unwrap_or(1), + &self.opts.concurrency_level, ) .await?; diff --git a/aptos-move/aptos-debugger/src/execute_pending_block.rs b/aptos-move/aptos-debugger/src/execute_pending_block.rs index 1f7b6dabc9506..7235dff6e9b01 100644 --- a/aptos-move/aptos-debugger/src/execute_pending_block.rs +++ b/aptos-move/aptos-debugger/src/execute_pending_block.rs @@ -6,7 +6,6 @@ use anyhow::Result; use aptos_crypto::HashValue; use aptos_logger::info; use aptos_rest_client::Client; -use aptos_vm::AptosVM; use clap::Parser; use std::path::PathBuf; use url::Url; @@ -42,8 +41,6 @@ pub struct Command { impl Command { pub async fn run(self) -> Result<()> { - AptosVM::set_concurrency_level_once(self.opts.concurrency_level); - let debugger = if let Some(rest_endpoint) = self.opts.target.rest_endpoint { AptosDebugger::rest_client(Client::new(Url::parse(&rest_endpoint)?))? } else if let Some(db_path) = self.opts.target.db_path { @@ -91,6 +88,7 @@ impl Command { self.begin_version, block, self.repeat_execution_times.unwrap_or(1), + &self.opts.concurrency_level, )?; println!("{txn_outputs:#?}"); diff --git a/aptos-move/aptos-e2e-comparison-testing/Cargo.toml b/aptos-move/aptos-e2e-comparison-testing/Cargo.toml index e28cf89d1a5d4..30c851670b4df 100644 --- a/aptos-move/aptos-e2e-comparison-testing/Cargo.toml +++ b/aptos-move/aptos-e2e-comparison-testing/Cargo.toml @@ -22,6 +22,7 @@ bcs = { workspace = true } clap = { workspace = true } futures = { workspace = true } itertools = { workspace = true } +move-binary-format = { workspace = true } move-compiler = { workspace = true } move-core-types = { workspace = true } move-model = { workspace = true } @@ -31,4 +32,3 @@ serde = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } url = { workspace = true } - diff --git a/aptos-move/aptos-e2e-comparison-testing/src/data_state_view.rs b/aptos-move/aptos-e2e-comparison-testing/src/data_state_view.rs index f5c81a5dc56e3..be1938618686f 100644 --- a/aptos-move/aptos-e2e-comparison-testing/src/data_state_view.rs +++ b/aptos-move/aptos-e2e-comparison-testing/src/data_state_view.rs @@ -12,6 +12,7 @@ use aptos_types::{ use aptos_validator_interface::{AptosValidatorInterface, DebuggerStateView}; use std::{ collections::HashMap, + ops::DerefMut, sync::{Arc, Mutex}, }; @@ -20,7 +21,6 @@ pub struct DataStateView { code_data: Option, data_read_state_keys: Option>>>, } -use std::ops::DerefMut; impl DataStateView { pub fn new( diff --git a/aptos-move/aptos-e2e-comparison-testing/src/execution.rs b/aptos-move/aptos-e2e-comparison-testing/src/execution.rs index 9e85437fce474..4a803878916de 100644 --- a/aptos-move/aptos-e2e-comparison-testing/src/execution.rs +++ b/aptos-move/aptos-e2e-comparison-testing/src/execution.rs @@ -12,20 +12,20 @@ use aptos_language_e2e_tests::{data_store::FakeDataStore, executor::FakeExecutor use aptos_types::{ contract_event::ContractEvent, on_chain_config::{FeatureFlag, Features, OnChainConfig}, - transaction::{Transaction, TransactionPayload, Version}, + transaction::{Transaction, Version}, vm_status::VMStatus, write_set::WriteSet, }; use aptos_validator_interface::AptosValidatorInterface; -use aptos_vm::data_cache::AsMoveResolver; use clap::ValueEnum; use itertools::Itertools; +use move_binary_format::file_format_common::VERSION_6; use move_core_types::{account_address::AccountAddress, language_storage::ModuleId}; use move_model::metadata::CompilerVersion; use std::{cmp, collections::HashMap, path::PathBuf, sync::Arc}; -fn load_packages_to_executor( - executor: &mut FakeExecutor, +fn add_packages_to_data_store( + data_store: &mut FakeDataStore, package_info: &PackageInfo, compiled_package_cache: &HashMap>>, ) { @@ -34,12 +34,12 @@ fn load_packages_to_executor( } let compiled_package = compiled_package_cache.get(package_info).unwrap(); for (module_id, module_blob) in compiled_package { - executor.add_module(module_id, module_blob.clone()); + data_store.add_module(module_id, module_blob.clone()); } } -fn load_aptos_packages_to_executor( - executor: &mut FakeExecutor, +fn add_aptos_packages_to_data_store( + data_store: &mut FakeDataStore, compiled_package_map: &HashMap>>, ) { for package in APTOS_PACKAGES { @@ -48,7 +48,7 @@ fn load_aptos_packages_to_executor( package_name: package.to_string(), upgrade_number: None, }; - load_packages_to_executor(executor, &package_info, compiled_package_map); + add_packages_to_data_store(data_store, &package_info, compiled_package_map); } } @@ -102,7 +102,7 @@ impl Execution { Self { input_path, execution_mode, - bytecode_version: 6, + bytecode_version: VERSION_6, } } @@ -270,9 +270,8 @@ impl Execution { return compiled_result; } } - // read the state data; + // read the state data let state = data_manager.get_state(cur_version); - // execute and compare self.execute_and_compare( cur_version, state, @@ -301,7 +300,7 @@ impl Execution { package_cache_main = compiled_package_cache_v2; v2_flag = true; } - let res_main_opt = self.execute_code( + let res_main = self.execute_code( cur_version, state.clone(), &txn_idx.package_info, @@ -311,7 +310,7 @@ impl Execution { v2_flag, ); if self.execution_mode.is_compare() { - let res_other_opt = self.execute_code( + let res_other = self.execute_code( cur_version, state, &txn_idx.package_info, @@ -320,20 +319,21 @@ impl Execution { debugger.clone(), true, ); - self.print_mismatches(cur_version, &res_main_opt.unwrap(), &res_other_opt.unwrap()); + self.print_mismatches(cur_version, &res_main, &res_other); } else { - let res = res_main_opt.unwrap(); - if let Ok(res_ok) = res { - self.output_result_str(format!( - "version:{}\nwrite set:{:?}\n events:{:?}\n", - cur_version, res_ok.0, res_ok.1 - )); - } else { - self.output_result_str(format!( - "execution error {} at version: {}, error", - res.unwrap_err(), - cur_version - )); + match res_main { + Ok((write_set, events)) => { + self.output_result_str(format!( + "version:{}\nwrite set:{:?}\n events:{:?}\n", + cur_version, write_set, events + )); + }, + Err(vm_status) => { + self.output_result_str(format!( + "execution error {} at version: {}, error", + vm_status, cur_version + )); + }, } } } @@ -341,74 +341,44 @@ impl Execution { fn execute_code( &self, version: Version, - state: FakeDataStore, + mut state: FakeDataStore, package_info: &PackageInfo, txn: &Transaction, compiled_package_cache: &HashMap>>, debugger_opt: Option>, v2_flag: bool, - ) -> Option), VMStatus>> { - let executor = FakeExecutor::no_genesis(); - let mut executor = executor.set_not_parallel(); - *executor.data_store_mut() = state; - if let Transaction::UserTransaction(signed_trans) = txn { - let sender = signed_trans.sender(); - let payload = signed_trans.payload(); - if let TransactionPayload::EntryFunction(entry_function) = payload { - // Always load 0x1 modules - load_aptos_packages_to_executor(&mut executor, compiled_package_cache); - // Load modules - if package_info.is_compilable() { - load_packages_to_executor(&mut executor, package_info, compiled_package_cache); - } - let mut senders = vec![sender]; - senders.extend(signed_trans.authenticator().secondary_signer_addresses()); - let enable_v7 = |features: &mut Features| { - if v2_flag { - features.enable(FeatureFlag::VM_BINARY_FORMAT_V7); - } else { - features.enable(FeatureFlag::VM_BINARY_FORMAT_V6); - } - }; - if let Some(debugger) = debugger_opt { - let data_view = - DataStateView::new(debugger, version, executor.data_store().clone()); - let mut features = - Features::fetch_config(&data_view.as_move_resolver()).unwrap_or_default(); - enable_v7(&mut features); - return Some(executor.try_exec_entry_with_state_view( - senders, - entry_function, - &data_view.as_move_resolver(), - features, - )); - } else { - let mut features = - Features::fetch_config(&executor.data_store().clone().as_move_resolver()) - .unwrap_or_default(); - enable_v7(&mut features); - return Some(executor.try_exec_entry_with_state_view( - senders, - entry_function, - &executor.data_store().clone().as_move_resolver(), - features, - )); - } - } + ) -> Result<(WriteSet, Vec), VMStatus> { + // Always add Aptos (0x1) packages. + add_aptos_packages_to_data_store(&mut state, compiled_package_cache); + + // Add other modules. + if package_info.is_compilable() { + add_packages_to_data_store(&mut state, package_info, compiled_package_cache); } + + // Update features if needed to the correct binary format used by V2 compiler. + let mut features = Features::fetch_config(&state).unwrap_or_default(); + if v2_flag { + features.enable(FeatureFlag::VM_BINARY_FORMAT_V7); + } else { + features.enable(FeatureFlag::VM_BINARY_FORMAT_V6); + } + state.set_features(features); + + // We use executor only to get access to block executor and avoid some of + // the initializations, but ignore its internal state, i.e., FakeDataStore. + let executor = FakeExecutor::no_genesis(); + let txns = vec![txn.clone()]; + if let Some(debugger) = debugger_opt { - let data_view = DataStateView::new(debugger, version, executor.data_store().clone()); - Some( - executor - .execute_transaction_block_with_state_view([txn.clone()].to_vec(), &data_view) - .map(|res| res[0].clone().into()), - ) + let data_view = DataStateView::new(debugger, version, state); + executor + .execute_transaction_block_with_state_view(txns, &data_view) + .map(|mut res| res.pop().unwrap().into()) } else { - Some( - executor - .execute_transaction_block(vec![txn.clone()]) - .map(|res| res[0].clone().into()), - ) + executor + .execute_transaction_block_with_state_view(txns, &state) + .map(|mut res| res.pop().unwrap().into()) } } diff --git a/aptos-move/aptos-gas-calibration/Cargo.toml b/aptos-move/aptos-gas-calibration/Cargo.toml index c4ba044ef4430..b2375eaccdeb7 100644 --- a/aptos-move/aptos-gas-calibration/Cargo.toml +++ b/aptos-move/aptos-gas-calibration/Cargo.toml @@ -2,7 +2,7 @@ name = "aptos-gas-calibration" version = "0.1.0" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +license = { workspace = true } [dependencies] anyhow = { workspace = true } diff --git a/aptos-move/aptos-gas-meter/src/meter.rs b/aptos-move/aptos-gas-meter/src/meter.rs index 1bbcf85a26191..f17c266e09fef 100644 --- a/aptos-move/aptos-gas-meter/src/meter.rs +++ b/aptos-move/aptos-gas-meter/src/meter.rs @@ -101,6 +101,13 @@ where MutBorrowField => MUT_BORROW_FIELD, ImmBorrowFieldGeneric => IMM_BORROW_FIELD_GENERIC, MutBorrowFieldGeneric => MUT_BORROW_FIELD_GENERIC, + ImmBorrowVariantField => IMM_BORROW_VARIANT_FIELD, + MutBorrowVariantField => MUT_BORROW_VARIANT_FIELD, + ImmBorrowVariantFieldGeneric => IMM_BORROW_VARIANT_FIELD_GENERIC, + MutBorrowVariantFieldGeneric => MUT_BORROW_VARIANT_FIELD_GENERIC, + TestVariant => TEST_VARIANT, + TestVariantGeneric => TEST_VARIANT_GENERIC, + FreezeRef => FREEZE_REF, CastU8 => CAST_U8, diff --git a/aptos-move/aptos-gas-meter/src/traits.rs b/aptos-move/aptos-gas-meter/src/traits.rs index fd2f2b3c367e0..92eed86690800 100644 --- a/aptos-move/aptos-gas-meter/src/traits.rs +++ b/aptos-move/aptos-gas-meter/src/traits.rs @@ -7,7 +7,7 @@ use aptos_types::{ contract_event::ContractEvent, state_store::state_key::StateKey, write_set::WriteOpSize, }; use aptos_vm_types::{ - change_set::VMChangeSet, + change_set::ChangeSetInterface, resolver::ExecutorView, storage::{ io_pricing::IoPricing, @@ -140,7 +140,7 @@ pub trait AptosGasMeter: MoveGasMeter { /// unless you are doing something special, such as injecting additional logging logic. fn process_storage_fee_for_all( &mut self, - change_set: &mut VMChangeSet, + change_set: &mut impl ChangeSetInterface, txn_size: NumBytes, gas_unit_price: FeePerGasUnit, executor_view: &dyn ExecutorView, @@ -173,12 +173,9 @@ pub trait AptosGasMeter: MoveGasMeter { } // Events (no event fee in v2) - let event_fee = change_set - .events() - .iter() - .fold(Fee::new(0), |acc, (event, _)| { - acc + pricing.legacy_storage_fee_per_event(params, event) - }); + let event_fee = change_set.events_iter().fold(Fee::new(0), |acc, event| { + acc + pricing.legacy_storage_fee_per_event(params, event) + }); let event_discount = pricing.legacy_storage_discount_for_events(params, event_fee); let event_net_fee = event_fee .checked_sub(event_discount) diff --git a/aptos-move/aptos-gas-profiling/src/aggregate.rs b/aptos-move/aptos-gas-profiling/src/aggregate.rs index ee1aab3891f8b..0095dad224670 100644 --- a/aptos-move/aptos-gas-profiling/src/aggregate.rs +++ b/aptos-move/aptos-gas-profiling/src/aggregate.rs @@ -11,6 +11,7 @@ use std::collections::{btree_map, BTreeMap}; /// Represents an aggregation of execution gas events, including the count and total gas costs for each type of event. /// /// The events are sorted by the amount of gas used, from high to low. +#[derive(Debug)] pub struct AggregatedExecutionGasEvents { /// The gas scaling factor. /// This is included so to make this struct self-contained, suitable for displaying in (external) gas units. diff --git a/aptos-move/aptos-gas-profiling/src/profiler.rs b/aptos-move/aptos-gas-profiling/src/profiler.rs index 89ba8fa72e0b5..01b7c8076a350 100644 --- a/aptos-move/aptos-gas-profiling/src/profiler.rs +++ b/aptos-move/aptos-gas-profiling/src/profiler.rs @@ -11,7 +11,7 @@ use aptos_types::{ contract_event::ContractEvent, state_store::state_key::StateKey, write_set::WriteOpSize, }; use aptos_vm_types::{ - change_set::VMChangeSet, resolver::ExecutorView, storage::space_pricing::ChargeAndRefund, + change_set::ChangeSetInterface, resolver::ExecutorView, storage::space_pricing::ChargeAndRefund, }; use move_binary_format::{ errors::{Location, PartialVMResult, VMResult}, @@ -568,7 +568,7 @@ where fn process_storage_fee_for_all( &mut self, - change_set: &mut VMChangeSet, + change_set: &mut impl ChangeSetInterface, txn_size: NumBytes, gas_unit_price: FeePerGasUnit, executor_view: &dyn ExecutorView, @@ -612,7 +612,7 @@ where // Events (no event fee in v2) let mut event_fee = Fee::new(0); let mut event_fees = vec![]; - for (event, _) in change_set.events().iter() { + for event in change_set.events_iter() { let fee = pricing.legacy_storage_fee_per_event(params, event); event_fees.push(EventStorage { ty: event.type_tag().clone(), diff --git a/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs b/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs index e965f8d918b55..1e7f1a4860103 100644 --- a/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs +++ b/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs @@ -3,7 +3,7 @@ //! This module defines the gas parameters for all Move instructions. -use crate::gas_schedule::VMGasParameters; +use crate::{gas_feature_versions::RELEASE_V1_18, gas_schedule::VMGasParameters}; use aptos_gas_algebra::{ InternalGas, InternalGasPerAbstractValueUnit, InternalGasPerArg, InternalGasPerByte, InternalGasPerTypeNode, @@ -43,8 +43,23 @@ crate::gas_schedule::macros::define_gas_parameters!( [mut_borrow_loc: InternalGas, "mut_borrow_loc", 220], [imm_borrow_field: InternalGas, "imm_borrow_field", 735], [mut_borrow_field: InternalGas, "mut_borrow_field", 735], - [imm_borrow_field_generic: InternalGas, "imm_borrow_field_generic", 735], + [imm_borrow_field_generic: InternalGas, "imm_borrow_field_generic" , 735], [mut_borrow_field_generic: InternalGas, "mut_borrow_field_generic", 735], + [imm_borrow_variant_field: InternalGas, + { RELEASE_V1_18.. => "imm_borrow_variant_field" }, 835], + [mut_borrow_variant_field: InternalGas, + { RELEASE_V1_18.. => "mut_borrow_variant_field" }, 835], + [imm_borrow_variant_field_generic: InternalGas, + { RELEASE_V1_18.. => "imm_borrow_variant_field_generic" }, 835], + [mut_borrow_variant_field_generic: InternalGas, + { RELEASE_V1_18.. => "mut_borrow_variant_field_generic" }, 835], + + // variant testing + [test_variant: InternalGas, + { RELEASE_V1_18.. => "test_variant" }, 535], + [test_variant_generic: InternalGas, + { RELEASE_V1_18.. => "test_variant_generic" }, 535], + // locals [copy_loc_base: InternalGas, "copy_loc.base", 294], [copy_loc_per_abs_val_unit: InternalGasPerAbstractValueUnit, "copy_loc.per_abs_val_unit", 14], diff --git a/aptos-move/aptos-gas-schedule/src/gas_schedule/move_stdlib.rs b/aptos-move/aptos-gas-schedule/src/gas_schedule/move_stdlib.rs index aa93df0d93859..c25afc6e515ba 100644 --- a/aptos-move/aptos-gas-schedule/src/gas_schedule/move_stdlib.rs +++ b/aptos-move/aptos-gas-schedule/src/gas_schedule/move_stdlib.rs @@ -3,7 +3,7 @@ //! This module defines the gas parameters for Move Stdlib. -use crate::gas_schedule::NativeGasParameters; +use crate::{gas_feature_versions::RELEASE_V1_18, gas_schedule::NativeGasParameters}; use aptos_gas_algebra::{InternalGas, InternalGasPerByte}; crate::gas_schedule::macros::define_gas_parameters!( @@ -31,5 +31,10 @@ crate::gas_schedule::macros::define_gas_parameters!( [string_index_of_base: InternalGas, "string.index_of.base", 1470], [string_index_of_per_byte_pattern: InternalGasPerByte, "string.index_of.per_byte_pattern", 73], [string_index_of_per_byte_searched: InternalGasPerByte, "string.index_of.per_byte_searched", 36], + + // Note(Gas): these initial values are guesswork. + [bcs_serialized_size_base: InternalGas, { RELEASE_V1_18.. => "bcs.serialized_size.base" }, 735], + [bcs_serialized_size_per_byte_serialized: InternalGasPerByte, { RELEASE_V1_18.. => "bcs.serialized_size.per_byte_serialized" }, 36], + [bcs_serialized_size_failure: InternalGas, { RELEASE_V1_18.. => "bcs.serialized_size.failure" }, 3676], ] ); diff --git a/aptos-move/aptos-gas-schedule/src/ver.rs b/aptos-move/aptos-gas-schedule/src/ver.rs index 3951385af5187..d424c330b6884 100644 --- a/aptos-move/aptos-gas-schedule/src/ver.rs +++ b/aptos-move/aptos-gas-schedule/src/ver.rs @@ -8,6 +8,9 @@ /// - Changing how gas is calculated in any way /// /// Change log: +/// - V22 +/// - Gas parameters for enums +/// - Gas parameters for new native function `bcs::serialized_size` /// - V21 /// - Fix type to type tag conversion in MoveVM /// - V20 @@ -66,7 +69,7 @@ /// global operations. /// - V1 /// - TBA -pub const LATEST_GAS_FEATURE_VERSION: u64 = gas_feature_versions::RELEASE_V1_16; +pub const LATEST_GAS_FEATURE_VERSION: u64 = gas_feature_versions::RELEASE_V1_20; pub mod gas_feature_versions { pub const RELEASE_V1_8: u64 = 11; @@ -79,4 +82,8 @@ pub mod gas_feature_versions { pub const RELEASE_V1_14: u64 = 19; pub const RELEASE_V1_15: u64 = 20; pub const RELEASE_V1_16: u64 = 21; + pub const RELEASE_V1_18: u64 = 22; + pub const RELEASE_V1_19: u64 = 23; + pub const RELEASE_V1_20: u64 = 24; + pub const RELEASE_V1_21: u64 = 25; } diff --git a/aptos-move/aptos-release-builder/Cargo.toml b/aptos-move/aptos-release-builder/Cargo.toml index a42cff88383d2..5b57f024147fc 100644 --- a/aptos-move/aptos-release-builder/Cargo.toml +++ b/aptos-move/aptos-release-builder/Cargo.toml @@ -24,18 +24,28 @@ aptos-gas-schedule-updator = { workspace = true } aptos-genesis = { workspace = true } aptos-infallible = { workspace = true } aptos-keygen = { workspace = true } +aptos-language-e2e-tests = { workspace = true } +aptos-move-debugger = { workspace = true } aptos-rest-client = { workspace = true } aptos-temppath = { workspace = true } aptos-types = { workspace = true } +aptos-vm = { workspace = true } +aptos-vm-logging = { workspace = true } +aptos-vm-types = { workspace = true } bcs = { workspace = true } clap = { workspace = true } futures = { workspace = true } git2 = { workspace = true } handlebars = { workspace = true } hex = { workspace = true } +move-binary-format = { workspace = true } +move-bytecode-verifier = { workspace = true } move-core-types = { workspace = true } move-model = { workspace = true } +move-vm-runtime = { workspace = true } +move-vm-types = { workspace = true } once_cell = { workspace = true } +parking_lot = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -45,6 +55,7 @@ strum = { workspace = true } strum_macros = { workspace = true } tokio = { workspace = true } url = { workspace = true } +walkdir = { workspace = true } [[bin]] name = "aptos-release-builder" diff --git a/aptos-move/aptos-release-builder/data/release.yaml b/aptos-move/aptos-release-builder/data/release.yaml index bb9643f6fdcca..a12d498463731 100644 --- a/aptos-move/aptos-release-builder/data/release.yaml +++ b/aptos-move/aptos-release-builder/data/release.yaml @@ -1,16 +1,19 @@ --- -remote_endpoint: ~ -name: "v1.16" +remote_endpoint: https://fullnode.mainnet.aptoslabs.com +name: "TBD" proposals: - - name: step_1_upgrade_framework + - name: proposal_1_upgrade_framework metadata: - title: "Multi-step proposal to upgrade mainnet framework to v1.16" - description: "This includes changes in https://github.com/aptos-labs/aptos-core/commits/aptos-release-v1.16" + title: "Multi-step proposal to upgrade mainnet framework, version TBD" + description: "This includes changes in (TBA: URL to changes)" execution_mode: MultiStep update_sequence: - Gas: - old: https://raw.githubusercontent.com/aptos-labs/aptos-networks/main/gas/v1.15.2.json - new: https://raw.githubusercontent.com/aptos-labs/aptos-networks/main/gas/v1.16.1-rc.json + new: current - Framework: bytecode_version: 6 git_hash: ~ + - FeatureFlag: + enabled: + - allow_serialized_script_args + diff --git a/aptos-move/aptos-release-builder/src/components/consensus_config.rs b/aptos-move/aptos-release-builder/src/components/consensus_config.rs index 84411598933f4..9eeac9ab67549 100644 --- a/aptos-move/aptos-release-builder/src/components/consensus_config.rs +++ b/aptos-move/aptos-release-builder/src/components/consensus_config.rs @@ -3,13 +3,16 @@ use crate::{components::get_signer_arg, utils::*}; use anyhow::Result; +use aptos_crypto::HashValue; +use aptos_framework::generate_blob_as_hex_string; use aptos_types::on_chain_config::OnChainConsensusConfig; use move_model::{code_writer::CodeWriter, emit, emitln, model::Loc}; pub fn generate_consensus_upgrade_proposal( consensus_config: &OnChainConsensusConfig, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -23,7 +26,8 @@ pub fn generate_consensus_upgrade_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["aptos_framework::consensus_config"], |writer| { let consensus_config_blob = bcs::to_bytes(consensus_config).unwrap(); diff --git a/aptos-move/aptos-release-builder/src/components/execution_config.rs b/aptos-move/aptos-release-builder/src/components/execution_config.rs index 27e27138711d5..554d163141c4f 100644 --- a/aptos-move/aptos-release-builder/src/components/execution_config.rs +++ b/aptos-move/aptos-release-builder/src/components/execution_config.rs @@ -3,13 +3,16 @@ use crate::{components::get_signer_arg, utils::*}; use anyhow::Result; +use aptos_crypto::HashValue; +use aptos_framework::generate_blob_as_hex_string; use aptos_types::on_chain_config::OnChainExecutionConfig; use move_model::{code_writer::CodeWriter, emit, emitln, model::Loc}; pub fn generate_execution_config_upgrade_proposal( execution_config: &OnChainExecutionConfig, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -23,7 +26,8 @@ pub fn generate_execution_config_upgrade_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["aptos_framework::execution_config"], |writer| { let execution_config_blob = bcs::to_bytes(execution_config).unwrap(); diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index 2d6edd728308e..6a1cbabc37f95 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -3,6 +3,7 @@ use crate::{components::get_signer_arg, utils::*}; use anyhow::Result; +use aptos_crypto::HashValue; use aptos_types::on_chain_config::{FeatureFlag as AptosFeatureFlag, Features as AptosFeatures}; use move_model::{code_writer::CodeWriter, emit, emitln, model::Loc}; use serde::{Deserialize, Serialize}; @@ -121,6 +122,15 @@ pub enum FeatureFlag { DefaultToConcurrentFungibleBalance, LimitVMTypeSize, AbortIfMultisigPayloadMismatch, + DisallowUserNative, + AllowSerializedScriptArgs, + UseCompatibilityCheckerV2, + EnableEnumTypes, + EnableResourceAccessControl, + RejectUnstableBytecodeForScript, + FederatedKeyless, + TransactionSimulationEnhancement, + CollectionOwner, } fn generate_features_blob(writer: &CodeWriter, data: &[u64]) { @@ -144,7 +154,8 @@ fn generate_features_blob(writer: &CodeWriter, data: &[u64]) { pub fn generate_feature_upgrade_proposal( features: &Features, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -173,7 +184,8 @@ pub fn generate_feature_upgrade_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["std::features"], |writer| { emit!(writer, "let enabled_blob: vector = "); @@ -316,6 +328,25 @@ impl From for AptosFeatureFlag { FeatureFlag::AbortIfMultisigPayloadMismatch => { AptosFeatureFlag::ABORT_IF_MULTISIG_PAYLOAD_MISMATCH }, + FeatureFlag::DisallowUserNative => AptosFeatureFlag::DISALLOW_USER_NATIVES, + FeatureFlag::AllowSerializedScriptArgs => { + AptosFeatureFlag::ALLOW_SERIALIZED_SCRIPT_ARGS + }, + FeatureFlag::UseCompatibilityCheckerV2 => { + AptosFeatureFlag::USE_COMPATIBILITY_CHECKER_V2 + }, + FeatureFlag::EnableEnumTypes => AptosFeatureFlag::ENABLE_ENUM_TYPES, + FeatureFlag::EnableResourceAccessControl => { + AptosFeatureFlag::ENABLE_RESOURCE_ACCESS_CONTROL + }, + FeatureFlag::RejectUnstableBytecodeForScript => { + AptosFeatureFlag::REJECT_UNSTABLE_BYTECODE_FOR_SCRIPT + }, + FeatureFlag::FederatedKeyless => AptosFeatureFlag::FEDERATED_KEYLESS, + FeatureFlag::TransactionSimulationEnhancement => { + AptosFeatureFlag::TRANSACTION_SIMULATION_ENHANCEMENT + }, + FeatureFlag::CollectionOwner => AptosFeatureFlag::COLLECTION_OWNER, } } } @@ -440,6 +471,25 @@ impl From for FeatureFlag { AptosFeatureFlag::ABORT_IF_MULTISIG_PAYLOAD_MISMATCH => { FeatureFlag::AbortIfMultisigPayloadMismatch }, + AptosFeatureFlag::DISALLOW_USER_NATIVES => FeatureFlag::DisallowUserNative, + AptosFeatureFlag::ALLOW_SERIALIZED_SCRIPT_ARGS => { + FeatureFlag::AllowSerializedScriptArgs + }, + AptosFeatureFlag::USE_COMPATIBILITY_CHECKER_V2 => { + FeatureFlag::UseCompatibilityCheckerV2 + }, + AptosFeatureFlag::ENABLE_ENUM_TYPES => FeatureFlag::EnableEnumTypes, + AptosFeatureFlag::ENABLE_RESOURCE_ACCESS_CONTROL => { + FeatureFlag::EnableResourceAccessControl + }, + AptosFeatureFlag::REJECT_UNSTABLE_BYTECODE_FOR_SCRIPT => { + FeatureFlag::RejectUnstableBytecodeForScript + }, + AptosFeatureFlag::FEDERATED_KEYLESS => FeatureFlag::FederatedKeyless, + AptosFeatureFlag::TRANSACTION_SIMULATION_ENHANCEMENT => { + FeatureFlag::TransactionSimulationEnhancement + }, + AptosFeatureFlag::COLLECTION_OWNER => FeatureFlag::CollectionOwner, } } } diff --git a/aptos-move/aptos-release-builder/src/components/framework.rs b/aptos-move/aptos-release-builder/src/components/framework.rs index d95c585d8e4fc..df8509cfe8934 100644 --- a/aptos-move/aptos-release-builder/src/components/framework.rs +++ b/aptos-move/aptos-release-builder/src/components/framework.rs @@ -3,6 +3,7 @@ use crate::{aptos_core_path, components::get_execution_hash}; use anyhow::Result; +use aptos_crypto::HashValue; use aptos_framework::{BuildOptions, BuiltPackage, ReleasePackage}; use aptos_temppath::TempPath; use aptos_types::account_address::AccountAddress; @@ -21,8 +22,14 @@ pub struct FrameworkReleaseConfig { pub fn generate_upgrade_proposals( config: &FrameworkReleaseConfig, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { + assert!( + is_multi_step || next_execution_hash.is_none(), + "only multi-step proposals can have a next execution hash" + ); + const APTOS_GIT_PATH: &str = "https://github.com/aptos-labs/aptos-core.git"; let mut package_path_list = [ @@ -54,7 +61,7 @@ pub fn generate_upgrade_proposals( // For generating multi-step proposal files, we need to generate them in the reverse order since // we need the hash of the next script. // We will reverse the order back when writing the files into a directory. - if !next_execution_hash.is_empty() { + if is_multi_step { package_path_list.reverse(); } @@ -102,16 +109,10 @@ pub fn generate_upgrade_proposals( let package = BuiltPackage::build(package_path, options)?; let release = ReleasePackage::new(package)?; - // If we're generating a single-step proposal on testnet - if is_testnet && next_execution_hash.is_empty() { - release.generate_script_proposal_testnet(account, move_script_path.clone())?; - // If we're generating a single-step proposal on mainnet - } else if next_execution_hash.is_empty() { - release.generate_script_proposal(account, move_script_path.clone())?; + if is_multi_step { // If we're generating a multi-step proposal - } else { let next_execution_hash_bytes = if result.is_empty() { - next_execution_hash.clone() + next_execution_hash } else { get_execution_hash(&result) }; @@ -120,7 +121,13 @@ pub fn generate_upgrade_proposals( move_script_path.clone(), next_execution_hash_bytes, )?; - }; + } else if is_testnet { + // If we're generating a single-step proposal on testnet + release.generate_script_proposal_testnet(account, move_script_path.clone())?; + } else { + // If we're generating a single-step proposal on mainnet + release.generate_script_proposal(account, move_script_path.clone())?; + } let mut script = format!( "// Framework commit hash: {}\n// Builder commit hash: {}\n", diff --git a/aptos-move/aptos-release-builder/src/components/gas.rs b/aptos-move/aptos-release-builder/src/components/gas.rs index 7e247e8a402a1..9534e6960da05 100644 --- a/aptos-move/aptos-release-builder/src/components/gas.rs +++ b/aptos-move/aptos-release-builder/src/components/gas.rs @@ -3,6 +3,8 @@ use crate::{components::get_signer_arg, utils::*}; use anyhow::Result; +use aptos_crypto::HashValue; +use aptos_framework::generate_blob_as_hex_string; use aptos_types::on_chain_config::{DiffItem, GasScheduleV2}; use move_model::{code_writer::CodeWriter, emit, emitln, model::Loc}; use sha3::{Digest, Sha3_512}; @@ -79,7 +81,8 @@ pub fn generate_gas_upgrade_proposal( old_gas_schedule: Option<&GasScheduleV2>, new_gas_schedule: &GasScheduleV2, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -114,7 +117,8 @@ pub fn generate_gas_upgrade_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["aptos_framework::gas_schedule"], |writer| { let gas_schedule_blob = bcs::to_bytes(new_gas_schedule).unwrap(); diff --git a/aptos-move/aptos-release-builder/src/components/jwk_consensus_config.rs b/aptos-move/aptos-release-builder/src/components/jwk_consensus_config.rs index fbe08d117fd82..930ac77028eab 100644 --- a/aptos-move/aptos-release-builder/src/components/jwk_consensus_config.rs +++ b/aptos-move/aptos-release-builder/src/components/jwk_consensus_config.rs @@ -2,13 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{components::get_signer_arg, utils::generate_governance_proposal}; +use aptos_crypto::HashValue; use aptos_types::on_chain_config::OnChainJWKConsensusConfig; use move_model::{code_writer::CodeWriter, emitln, model::Loc}; pub fn generate_jwk_consensus_config_update_proposal( config: &OnChainJWKConsensusConfig, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> anyhow::Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -18,7 +20,8 @@ pub fn generate_jwk_consensus_config_update_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["aptos_framework::jwk_consensus_config", "std::string::utf8"], |writer| { match config { diff --git a/aptos-move/aptos-release-builder/src/components/mod.rs b/aptos-move/aptos-release-builder/src/components/mod.rs index 378404cbb6227..35b4e05e4de3a 100644 --- a/aptos-move/aptos-release-builder/src/components/mod.rs +++ b/aptos-move/aptos-release-builder/src/components/mod.rs @@ -11,6 +11,7 @@ use crate::{ }; use anyhow::{anyhow, bail, Context, Result}; use aptos::governance::GenerateExecutionHash; +use aptos_crypto::HashValue; use aptos_gas_schedule::LATEST_GAS_FEATURE_VERSION; use aptos_infallible::duration_since_epoch; use aptos_rest_client::Client; @@ -25,6 +26,7 @@ use aptos_types::{ }; use futures::executor::block_on; use handlebars::Handlebars; +use move_binary_format::file_format_common::VERSION_6; use once_cell::sync::Lazy; use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use std::{ @@ -232,8 +234,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, ) .unwrap(), ); @@ -251,7 +254,12 @@ impl ReleaseEntry { ), None => { match client { - Some(client) => Some(fetch_config::(client)?), + Some(_client) => { + // We could return `Some(fetch_config::(client)?)`, + // but this makes certain test scenarios flaky, so just return + // None here + None + }, None => { println!("!!! WARNING !!!"); println!("Generating gas schedule upgrade without a base for comparison."); @@ -275,8 +283,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?); } }, @@ -288,8 +297,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?); } }, @@ -316,8 +326,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?); } }, @@ -329,8 +340,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?); } }, @@ -343,8 +355,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?, ); } @@ -356,8 +369,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?); }, ReleaseEntry::RawScript(script_path) => { @@ -409,8 +423,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?, ); }, @@ -422,8 +437,9 @@ impl ReleaseEntry { if is_multi_step { get_execution_hash(result) } else { - "".to_owned().into_bytes() + None }, + is_multi_step, )?, ); }, @@ -717,7 +733,7 @@ impl Default for ReleaseConfig { metadata: ProposalMetadata::default(), name: "framework".to_string(), update_sequence: vec![ReleaseEntry::Framework(FrameworkReleaseConfig { - bytecode_version: 6, // TODO: remove explicit bytecode version from sources + bytecode_version: VERSION_6, git_hash: None, })], }, @@ -747,9 +763,9 @@ impl Default for ReleaseConfig { transaction_shuffler_type: TransactionShufflerType::DeprecatedSenderAwareV1(32), })), - ReleaseEntry::RawScript(PathBuf::from( - "data/proposals/empty_multi_step.move", - )), + //ReleaseEntry::RawScript(PathBuf::from( + // "data/proposals/empty_multi_step.move", + //)), ], }, ], @@ -757,9 +773,9 @@ impl Default for ReleaseConfig { } } -pub fn get_execution_hash(result: &[(String, String)]) -> Vec { +pub fn get_execution_hash(result: &[(String, String)]) -> Option { if result.is_empty() { - "vector::empty()".to_owned().into_bytes() + None } else { let temp_script_path = TempPath::new(); temp_script_path.create_as_file().unwrap(); @@ -780,7 +796,7 @@ pub fn get_execution_hash(result: &[(String, String)]) -> Vec { } .generate_hash() .unwrap(); - hash.to_vec() + Some(hash) } } @@ -821,8 +837,8 @@ impl Default for ProposalMetadata { } } -fn get_signer_arg(is_testnet: bool, next_execution_hash: &[u8]) -> &str { - if is_testnet && next_execution_hash.is_empty() { +fn get_signer_arg(is_testnet: bool, next_execution_hash: &Option) -> &str { + if is_testnet && next_execution_hash.is_none() { "framework_signer" } else { "&framework_signer" diff --git a/aptos-move/aptos-release-builder/src/components/oidc_providers.rs b/aptos-move/aptos-release-builder/src/components/oidc_providers.rs index 1da209d3e2c86..4baa424ad116a 100644 --- a/aptos-move/aptos-release-builder/src/components/oidc_providers.rs +++ b/aptos-move/aptos-release-builder/src/components/oidc_providers.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{components::get_signer_arg, utils::generate_governance_proposal}; +use aptos_crypto::HashValue; use move_model::{code_writer::CodeWriter, emitln, model::Loc}; use serde::{Deserialize, Serialize}; @@ -20,7 +21,8 @@ pub enum OidcProviderOp { pub fn generate_oidc_provider_ops_proposal( ops: &[OidcProviderOp], is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> anyhow::Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -30,7 +32,8 @@ pub fn generate_oidc_provider_ops_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["aptos_framework::jwks"], |writer| { for op in ops { diff --git a/aptos-move/aptos-release-builder/src/components/randomness_config.rs b/aptos-move/aptos-release-builder/src/components/randomness_config.rs index f3a59713874db..3f01c08376e2b 100644 --- a/aptos-move/aptos-release-builder/src/components/randomness_config.rs +++ b/aptos-move/aptos-release-builder/src/components/randomness_config.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{components::get_signer_arg, utils::generate_governance_proposal}; +use aptos_crypto::HashValue; use aptos_types::on_chain_config::OnChainRandomnessConfig; use move_model::{code_writer::CodeWriter, emitln, model::Loc}; use serde::{Deserialize, Serialize}; @@ -47,7 +48,8 @@ impl From for OnChainRandomnessConfig { pub fn generate_randomness_config_update_proposal( config: &ReleaseFriendlyRandomnessConfig, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> anyhow::Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -57,7 +59,8 @@ pub fn generate_randomness_config_update_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &[ "aptos_framework::randomness_config", "aptos_std::fixed_point64", diff --git a/aptos-move/aptos-release-builder/src/components/transaction_fee.rs b/aptos-move/aptos-release-builder/src/components/transaction_fee.rs index 9b4b17dce06c7..a9f72e416bd5e 100644 --- a/aptos-move/aptos-release-builder/src/components/transaction_fee.rs +++ b/aptos-move/aptos-release-builder/src/components/transaction_fee.rs @@ -3,13 +3,15 @@ use crate::utils::*; use anyhow::Result; +use aptos_crypto::HashValue; use move_model::{code_writer::CodeWriter, emitln, model::Loc}; pub fn generate_fee_distribution_proposal( function_name: String, burn_percentage: u8, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { let mut result = vec![]; @@ -19,6 +21,7 @@ pub fn generate_fee_distribution_proposal( &writer, is_testnet, next_execution_hash, + is_multi_step, &["aptos_framework::transaction_fee"], |writer| { emitln!( @@ -37,25 +40,29 @@ pub fn generate_fee_distribution_proposal( pub fn generate_proposal_to_initialize_fee_collection_and_distribution( burn_percentage: u8, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { generate_fee_distribution_proposal( "initialize_fee_collection_and_distribution".to_string(), burn_percentage, is_testnet, next_execution_hash, + is_multi_step, ) } pub fn generate_proposal_to_upgrade_burn_percentage( burn_percentage: u8, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { generate_fee_distribution_proposal( "upgrade_burn_percentage".to_string(), burn_percentage, is_testnet, next_execution_hash, + is_multi_step, ) } diff --git a/aptos-move/aptos-release-builder/src/components/version.rs b/aptos-move/aptos-release-builder/src/components/version.rs index 50cbd6987527f..3d385f118ad9a 100644 --- a/aptos-move/aptos-release-builder/src/components/version.rs +++ b/aptos-move/aptos-release-builder/src/components/version.rs @@ -3,13 +3,15 @@ use crate::{components::get_signer_arg, utils::*}; use anyhow::Result; +use aptos_crypto::HashValue; use aptos_types::on_chain_config::AptosVersion; use move_model::{code_writer::CodeWriter, emitln, model::Loc}; pub fn generate_version_upgrade_proposal( version: &AptosVersion, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, ) -> Result> { let signer_arg = get_signer_arg(is_testnet, &next_execution_hash); let mut result = vec![]; @@ -19,7 +21,8 @@ pub fn generate_version_upgrade_proposal( let proposal = generate_governance_proposal( &writer, is_testnet, - next_execution_hash.clone(), + next_execution_hash, + is_multi_step, &["aptos_framework::version"], |writer| { emitln!( diff --git a/aptos-move/aptos-release-builder/src/lib.rs b/aptos-move/aptos-release-builder/src/lib.rs index 913af0d23002d..1a74516d9827e 100644 --- a/aptos-move/aptos-release-builder/src/lib.rs +++ b/aptos-move/aptos-release-builder/src/lib.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 pub mod components; +pub mod simulate; mod utils; pub mod validate; diff --git a/aptos-move/aptos-release-builder/src/main.rs b/aptos-move/aptos-release-builder/src/main.rs index 205e8d8224621..8352800e81cd7 100644 --- a/aptos-move/aptos-release-builder/src/main.rs +++ b/aptos-move/aptos-release-builder/src/main.rs @@ -8,6 +8,7 @@ use aptos_gas_schedule::LATEST_GAS_FEATURE_VERSION; use aptos_release_builder::{ components::fetch_config, initialize_aptos_core_path, + simulate::simulate_all_proposals, validate::{DEFAULT_RESOLUTION_TIME, FAST_RESOLUTION_TIME}, }; use aptos_types::{ @@ -17,6 +18,7 @@ use aptos_types::{ }; use clap::{Parser, Subcommand}; use std::{path::PathBuf, str::FromStr}; +use url::Url; #[derive(Parser)] pub struct Argument { @@ -26,6 +28,43 @@ pub struct Argument { aptos_core_path: Option, } +// TODO(vgao1996): unify with `ReplayNetworkSelection` in the `aptos` crate. +#[derive(Clone, Debug)] +pub enum NetworkSelection { + Mainnet, + Testnet, + Devnet, + RestEndpoint(String), +} + +impl FromStr for NetworkSelection { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(match s { + "mainnet" => Self::Mainnet, + "testnet" => Self::Testnet, + "devnet" => Self::Devnet, + _ => Self::RestEndpoint(s.to_owned()), + }) + } +} + +impl NetworkSelection { + fn to_url(&self) -> anyhow::Result { + use NetworkSelection::*; + + let s = match &self { + Mainnet => "https://fullnode.mainnet.aptoslabs.com", + Testnet => "https://fullnode.testnet.aptoslabs.com", + Devnet => "https://fullnode.devnet.aptoslabs.com", + RestEndpoint(url) => url, + }; + + Ok(Url::parse(s)?) + } +} + #[derive(Subcommand, Debug)] pub enum Commands { /// Generate sets of governance proposals based on the release_config file passed in @@ -34,6 +73,24 @@ pub enum Commands { release_config: PathBuf, #[clap(short, long)] output_dir: PathBuf, + + #[clap(long)] + simulate: Option, + }, + /// Simulate a multi-step proposal on the specified network, using its current states. + /// The simulation will execute the governance scripts, as if the proposal is already + /// approved. + Simulate { + /// Directory that may contain one or more proposals at any level + /// within its sub-directory hierarchy. + #[clap(short, long)] + path: PathBuf, + + /// The network to simulate on. + /// + /// Possible values: devnet, testnet, mainnet, + #[clap(long)] + network: NetworkSelection, }, /// Generate sets of governance proposals with default release config. WriteDefault { @@ -126,12 +183,23 @@ async fn main() -> anyhow::Result<()> { Commands::GenerateProposals { release_config, output_dir, + simulate, } => { aptos_release_builder::ReleaseConfig::load_config(release_config.as_path()) .with_context(|| "Failed to load release config".to_string())? .generate_release_proposal_scripts(output_dir.as_path()) .await .with_context(|| "Failed to generate release proposal scripts".to_string())?; + + if let Some(network) = simulate { + let remote_endpoint = network.to_url()?; + simulate_all_proposals(remote_endpoint, output_dir.as_path()).await?; + } + + Ok(()) + }, + Commands::Simulate { network, path } => { + simulate_all_proposals(network.to_url()?, &path).await?; Ok(()) }, Commands::WriteDefault { output_path } => { diff --git a/aptos-move/aptos-release-builder/src/simulate.rs b/aptos-move/aptos-release-builder/src/simulate.rs new file mode 100644 index 0000000000000..0909e62d6df8d --- /dev/null +++ b/aptos-move/aptos-release-builder/src/simulate.rs @@ -0,0 +1,731 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! This module implements the simulation of governance proposals. +//! Currently, it supports only multi-step proposals. +//! +//! It utilizes the remote debugger infrastructure to fetch real chain states +//! for local simulation, but adds another in-memory database to store the new side effects +//! generated by the governance scripts. +//! +//! Normally, governance scripts needs to be approved through on-chain governance +//! before they could be executed. This process involves setting up various states +//! (e.g., staking pool, delegated voter), which can be quite complex. +//! +//! This simulation bypasses these challenges by patching specific Move functions +//! with mock versions, most notably `fun resolve_multi_step_proposal`, thus allowing +//! the governance process to be skipped altogether. +//! +//! In other words, this simulation is intended for checking whether a governance +//! proposal will execute successfully, assuming it gets approved, not whether the +//! governance framework itself is working as intended. + +use crate::aptos_framework_path; +use anyhow::{anyhow, bail, Context, Result}; +use aptos::{ + common::types::PromptOptions, governance::compile_in_temp_dir, move_tool::FrameworkPackageArgs, +}; +use aptos_crypto::HashValue; +use aptos_gas_schedule::{AptosGasParameters, FromOnChainGasSchedule}; +use aptos_language_e2e_tests::account::AccountData; +use aptos_move_debugger::aptos_debugger::AptosDebugger; +use aptos_rest_client::Client; +use aptos_types::{ + account_address::AccountAddress, + account_config::ChainIdResource, + on_chain_config::{ApprovedExecutionHashes, Features, GasScheduleV2, OnChainConfig}, + state_store::{ + in_memory_state_view::InMemoryStateView, state_key::StateKey, + state_storage_usage::StateStorageUsage, state_value::StateValue, + Result as StateStoreResult, StateView, TStateView, + }, + transaction::{ExecutionStatus, Script, TransactionArgument, TransactionStatus}, + vm::configs::aptos_prod_deserializer_config, + write_set::{TransactionWrite, WriteSet}, +}; +use aptos_vm::{ + data_cache::AsMoveResolver, + move_vm_ext::{flush_warm_vm_cache, SessionId}, + AptosVM, +}; +use aptos_vm_logging::log_schema::AdapterLogSchema; +use aptos_vm_types::storage::change_set_configs::ChangeSetConfigs; +use clap::Parser; +use move_binary_format::{ + access::ModuleAccess, + deserializer::DeserializerConfig, + file_format::{ + AddressIdentifierIndex, Bytecode, FunctionDefinition, FunctionHandle, FunctionHandleIndex, + IdentifierIndex, ModuleHandle, ModuleHandleIndex, Signature, SignatureIndex, + SignatureToken, Visibility, + }, + CompiledModule, +}; +use move_core_types::{ + identifier::{IdentStr, Identifier}, + language_storage::{ModuleId, StructTag}, + move_resource::MoveResource, +}; +use move_vm_runtime::module_traversal::{TraversalContext, TraversalStorage}; +use move_vm_types::{gas::UnmeteredGasMeter, resolver::ModuleResolver}; +use once_cell::sync::Lazy; +use parking_lot::Mutex; +use serde::Serialize; +use std::{ + collections::HashMap, + io::Write, + path::{Path, PathBuf}, +}; +use url::Url; +use walkdir::WalkDir; + +/*************************************************************************************************** + * Compiled Module Helpers + * + **************************************************************************************************/ +fn find_function_def_by_name<'a>( + m: &'a mut CompiledModule, + name: &IdentStr, +) -> Option<&'a mut FunctionDefinition> { + for (idx, func_def) in m.function_defs.iter().enumerate() { + let func_handle = m.function_handle_at(func_def.function); + let func_name = m.identifier_at(func_handle.name); + if name == func_name { + return Some(&mut m.function_defs[idx]); + } + } + None +} + +fn get_or_add(pool: &mut Vec, val: T) -> usize { + match pool.iter().position(|elem| elem == &val) { + Some(idx) => idx, + None => { + let idx = pool.len(); + pool.push(val); + idx + }, + } +} + +#[allow(dead_code)] +fn get_or_add_addr(m: &mut CompiledModule, addr: AccountAddress) -> AddressIdentifierIndex { + AddressIdentifierIndex::new(get_or_add(&mut m.address_identifiers, addr) as u16) +} + +fn get_or_add_ident(m: &mut CompiledModule, ident: Identifier) -> IdentifierIndex { + IdentifierIndex::new(get_or_add(&mut m.identifiers, ident) as u16) +} + +#[allow(dead_code)] +fn get_or_add_module_handle( + m: &mut CompiledModule, + addr: AccountAddress, + name: Identifier, +) -> ModuleHandleIndex { + let addr = get_or_add_addr(m, addr); + let name = get_or_add_ident(m, name); + let module_handle = ModuleHandle { + address: addr, + name, + }; + ModuleHandleIndex::new(get_or_add(&mut m.module_handles, module_handle) as u16) +} + +fn get_or_add_signature(m: &mut CompiledModule, sig: Vec) -> SignatureIndex { + SignatureIndex::new(get_or_add(&mut m.signatures, Signature(sig)) as u16) +} + +fn find_function_handle_by_name( + m: &CompiledModule, + addr: AccountAddress, + module_name: &IdentStr, + func_name: &IdentStr, +) -> Option { + for (idx, func_handle) in m.function_handles().iter().enumerate() { + let module_handle = m.module_handle_at(func_handle.module); + if m.address_identifier_at(module_handle.address) == &addr + && m.identifier_at(module_handle.name) == module_name + && m.identifier_at(func_handle.name) == func_name + { + return Some(FunctionHandleIndex(idx as u16)); + } + } + None +} + +fn add_simple_native_function( + m: &mut CompiledModule, + func_name: Identifier, + params: Vec, + returns: Vec, +) -> Result { + if let Some(func_handle_idx) = + find_function_handle_by_name(m, *m.self_addr(), m.self_name(), &func_name) + { + return Ok(func_handle_idx); + } + + let name = get_or_add_ident(m, func_name); + let parameters = get_or_add_signature(m, params); + let return_ = get_or_add_signature(m, returns); + let func_handle = FunctionHandle { + module: m.self_handle_idx(), + name, + parameters, + return_, + type_parameters: vec![], + access_specifiers: None, + }; + let func_handle_idx = FunctionHandleIndex(m.function_handles.len() as u16); + m.function_handles.push(func_handle); + + let func_def = FunctionDefinition { + function: func_handle_idx, + visibility: Visibility::Private, + is_entry: false, + acquires_global_resources: vec![], + code: None, + }; + m.function_defs.push(func_def); + + Ok(func_handle_idx) +} +/*************************************************************************************************** + * Simulation State View + * + **************************************************************************************************/ +/// A state view specifically designed for managing the side effects generated by +/// the governance scripts. +/// +/// It comprises two components: +/// - A remote debugger state view to enable on-demand data fetching. +/// - A local state store to allow new changes to be stacked on top of the remote state. +struct SimulationStateView<'a, S> { + remote: &'a S, + states: Mutex>>, +} + +impl<'a, S> SimulationStateView<'a, S> +where + S: StateView, +{ + fn set_state_value(&self, state_key: StateKey, state_val: StateValue) { + self.states.lock().insert(state_key, Some(state_val)); + } + + fn set_on_chain_config(&self, config: &C) -> Result<()> + where + C: OnChainConfig + Serialize, + { + let addr = AccountAddress::from_hex_literal(C::ADDRESS).unwrap(); + + self.set_state_value( + StateKey::resource(&addr, &StructTag { + address: addr, + module: Identifier::new(C::MODULE_IDENTIFIER).unwrap(), + name: Identifier::new(C::TYPE_IDENTIFIER).unwrap(), + type_args: vec![], + })?, + StateValue::new_legacy(bcs::to_bytes(&config)?.into()), + ); + + Ok(()) + } + + fn modify_on_chain_config(&self, modify: F) -> Result<()> + where + C: OnChainConfig + Serialize, + F: FnOnce(&mut C) -> Result<()>, + { + let mut config = C::fetch_config(self).ok_or_else(|| { + anyhow!( + "failed to fetch on-chain config: {:?}", + std::any::type_name::() + ) + })?; + + modify(&mut config)?; + + self.set_on_chain_config(&config)?; + + Ok(()) + } + + #[allow(dead_code)] + fn remove_state_value(&mut self, state_key: &StateKey) { + self.states.lock().remove(state_key); + } + + fn apply_write_set(&self, write_set: WriteSet) { + let mut states = self.states.lock(); + + for (state_key, write_op) in write_set { + match write_op.as_state_value() { + None => { + states.remove(&state_key); + }, + Some(state_val) => { + states.insert(state_key, Some(state_val)); + }, + } + } + } + + #[allow(dead_code)] + fn read_resource(&self, addr: &AccountAddress) -> T { + let data_blob = self + .get_state_value_bytes( + &StateKey::resource_typed::(addr).expect("failed to create StateKey"), + ) + .expect("account must exist in data store") + .unwrap_or_else(|| panic!("Can't fetch {} resource for {}", T::STRUCT_NAME, addr)); + + bcs::from_bytes(&data_blob).expect("failed to deserialize resource") + } +} + +impl<'a, S> TStateView for SimulationStateView<'a, S> +where + S: StateView, +{ + type Key = StateKey; + + fn get_state_value(&self, state_key: &Self::Key) -> StateStoreResult> { + if let Some(res) = self.states.lock().get(state_key) { + return Ok(res.clone()); + } + self.remote.get_state_value(state_key) + } + + fn get_usage(&self) -> StateStoreResult { + Ok(StateStorageUsage::Untracked) + } + + fn as_in_memory_state_view(&self) -> InMemoryStateView { + panic!("not supported") + } +} + +/*************************************************************************************************** + * Patches + * + **************************************************************************************************/ +static MODULE_ID_APTOS_GOVERNANCE: Lazy = Lazy::new(|| { + ModuleId::new( + AccountAddress::ONE, + Identifier::new("aptos_governance").unwrap(), + ) +}); + +static FUNC_NAME_CREATE_SIGNER: Lazy = + Lazy::new(|| Identifier::new("create_signer").unwrap()); + +static FUNC_NAME_RESOLVE_MULTI_STEP_PROPOSAL: Lazy = + Lazy::new(|| Identifier::new("resolve_multi_step_proposal").unwrap()); + +const DUMMY_PROPOSAL_ID: u64 = u64::MAX; + +const MAGIC_FAILED_NEXT_EXECUTION_HASH_CHECK: u64 = 0xDEADBEEF; + +/// Helper to load a module from the state view, deserialize it, modify it with +/// the provided callback, reserialize it and finally write it back. +fn patch_module( + state_view: &SimulationStateView, + deserializer_config: &DeserializerConfig, + module_id: &ModuleId, + modify_module: F, +) -> Result<()> +where + F: FnOnce(&mut CompiledModule) -> Result<()>, +{ + let resolver = state_view.as_move_resolver(); + let blob = resolver + .get_module(module_id)? + .ok_or_else(|| anyhow!("module {} does not exist", module_id))?; + + let mut m = CompiledModule::deserialize_with_config(&blob, deserializer_config)?; + + modify_module(&mut m)?; + + // Sanity check to ensure the correctness of the check + move_bytecode_verifier::verify_module(&m).map_err(|err| { + anyhow!( + "patched module failed to verify -- check if the patch is correct: {}", + err + ) + })?; + + let mut blob = vec![]; + m.serialize(&mut blob)?; + + state_view.set_state_value( + StateKey::module_id(module_id), + StateValue::new_legacy(blob.into()), + ); + + Ok(()) +} + +/// Patches `aptos_framework::aptos_governance::resolve_multi_step_proposal` so that +/// it returns the requested signer directly, skipping the governance process altogether. +fn patch_aptos_governance( + state_view: &SimulationStateView, + deserializer_config: &DeserializerConfig, + forbid_next_execution_hash: bool, +) -> Result<()> { + use Bytecode::*; + + patch_module( + state_view, + deserializer_config, + &MODULE_ID_APTOS_GOVERNANCE, + |m| { + // Inject `native fun create_signer`. + let create_signer_handle_idx = add_simple_native_function( + m, + FUNC_NAME_CREATE_SIGNER.clone(), + vec![SignatureToken::Address], + vec![SignatureToken::Signer], + )?; + + // Patch `fun resolve_multi_step_proposal`. + let sig_u8_idx = get_or_add_signature(m, vec![SignatureToken::U8]); + + let func_def = find_function_def_by_name(m, &FUNC_NAME_RESOLVE_MULTI_STEP_PROPOSAL) + .ok_or_else(|| { + anyhow!( + "failed to locate `fun {}`", + &*FUNC_NAME_RESOLVE_MULTI_STEP_PROPOSAL + ) + })?; + func_def.acquires_global_resources = vec![]; + let code = func_def.code.as_mut().ok_or_else(|| { + anyhow!( + "`fun {}` must have a Move-defined body", + &*FUNC_NAME_RESOLVE_MULTI_STEP_PROPOSAL + ) + })?; + + code.code.clear(); + if forbid_next_execution_hash { + // If it is needed to forbid a next execution hash, inject additional Move + // code at the beginning that aborts with a magic number if the vector + // representing the hash is not empty. + // + // if (!vector::is_empty(&next_execution_hash)) { + // abort MAGIC_FAILED_NEXT_EXECUTION_HASH_CHECK; + // } + // + // The magic number can later be checked in Rust to determine if such violation + // has happened. + code.code.extend([ + ImmBorrowLoc(2), + VecLen(sig_u8_idx), + LdU64(0), + Eq, + BrTrue(7), + LdU64(MAGIC_FAILED_NEXT_EXECUTION_HASH_CHECK), + Abort, + ]); + } + // Replace the original logic with `create_signer(signer_address)`, bypassing + // the governance process. + code.code + .extend([MoveLoc(1), Call(create_signer_handle_idx), Ret]); + + Ok(()) + }, + ) +} + +// Add the hash of the script to the list of approved hashes, so to enable the +// alternative (higher) execution limits. +fn add_script_execution_hash( + state_view: &SimulationStateView, + hash: HashValue, +) -> Result<()> { + let entry = (DUMMY_PROPOSAL_ID, hash.to_vec()); + + state_view.modify_on_chain_config(|approved_hashes: &mut ApprovedExecutionHashes| { + if !approved_hashes.entries.contains(&entry) { + approved_hashes.entries.push(entry); + } + Ok(()) + }) +} + +/*************************************************************************************************** + * Simulation Workflow + * + **************************************************************************************************/ +fn force_end_epoch(state_view: &SimulationStateView) -> Result<()> { + flush_warm_vm_cache(); + let vm = AptosVM::new_for_gov_sim(&state_view); + let resolver = state_view.as_move_resolver(); + + let gas_schedule = + GasScheduleV2::fetch_config(&state_view).context("failed to fetch gas schedule v2")?; + let gas_feature_version = gas_schedule.feature_version; + + let change_set_configs = + ChangeSetConfigs::unlimited_at_gas_feature_version(gas_feature_version); + + let traversal_storage = TraversalStorage::new(); + let mut sess = vm.new_session(&resolver, SessionId::void(), None); + sess.execute_function_bypass_visibility( + &MODULE_ID_APTOS_GOVERNANCE, + IdentStr::new("force_end_epoch").unwrap(), + vec![], + vec![bcs::to_bytes(&AccountAddress::ONE)?], + &mut UnmeteredGasMeter, + &mut TraversalContext::new(&traversal_storage), + )?; + let (mut change_set, module_write_set) = sess.finish(&change_set_configs)?; + change_set.try_materialize_aggregator_v1_delta_set(&resolver)?; + + let (write_set, _events) = change_set + .try_combine_into_storage_change_set(module_write_set) + .expect("Failed to convert to storage ChangeSet") + .into_inner(); + + state_view.apply_write_set(write_set); + + Ok(()) +} + +pub async fn simulate_multistep_proposal( + remote_url: Url, + proposal_dir: &Path, + proposal_scripts: &[PathBuf], +) -> Result<()> { + println!("Simulating proposal at {}", proposal_dir.display()); + + // Compile all scripts. + println!("Compiling scripts..."); + let mut compiled_scripts = vec![]; + for path in proposal_scripts { + let framework_package_args = FrameworkPackageArgs::try_parse_from([ + "dummy_executable_name", + "--framework-local-dir", + &aptos_framework_path().to_string_lossy(), + "--skip-fetch-latest-git-deps", + ]) + .context( + "failed to parse framework package args for compiling scripts, this should not happen", + )?; + + let (blob, hash) = compile_in_temp_dir( + "script", + path, + &framework_package_args, + PromptOptions::yes(), + None, // bytecode_version + None, // language_version + None, // compiler_version + ) + .with_context(|| format!("failed to compile script {}", path.display()))?; + + compiled_scripts.push((blob, hash)); + } + + // Set up the simulation state view. + let client = Client::new(remote_url); + let debugger = + AptosDebugger::rest_client(client.clone()).context("failed to create AptosDebugger")?; + let state = client.get_ledger_information().await?.into_inner(); + + let state_view = SimulationStateView { + remote: &debugger.state_view_at_version(state.version), + states: Mutex::new(HashMap::new()), + }; + + // Create and fund a sender account that is used to send the governance scripts. + print!("Creating and funding sender account.. "); + std::io::stdout().flush()?; + let mut rng = aptos_keygen::KeyGen::from_seed([0; 32]); + let balance = 100 * 1_0000_0000; // 100 APT + let account = AccountData::new_from_seed(&mut rng, balance, 0); + state_view.apply_write_set(account.to_writeset()); + // TODO: should update coin info (total supply) + println!("done"); + + // Execute the governance scripts in sorted order. + println!("Executing governance scripts..."); + + for (script_idx, (script_path, (script_blob, script_hash))) in + proposal_scripts.iter().zip(compiled_scripts).enumerate() + { + // Force-end the epoch so that buffered configuration changes get applied. + force_end_epoch(&state_view).context("failed to force end epoch")?; + + // Fetch the on-chain configs that are needed for the simulation. + let chain_id = + ChainIdResource::fetch_config(&state_view).context("failed to fetch chain id")?; + + let gas_schedule = + GasScheduleV2::fetch_config(&state_view).context("failed to fetch gas schedule v2")?; + let gas_feature_version = gas_schedule.feature_version; + let gas_params = AptosGasParameters::from_on_chain_gas_schedule( + &gas_schedule.into_btree_map(), + gas_feature_version, + ) + .map_err(|err| { + anyhow!( + "failed to construct gas params at gas version {}: {}", + gas_feature_version, + err + ) + })?; + + // Patch framework functions to skip the governance process. + // This is redone every time we execute a script because the previous script could have + // overwritten the framework. + let features = + Features::fetch_config(&state_view).context("failed to fetch feature flags")?; + let deserializer_config = aptos_prod_deserializer_config(&features); + + // If the script is the last step of the proposal, it MUST NOT have a next execution hash. + // Set the boolean flag to true to use a modified patch to catch this. + let forbid_next_execution_hash = script_idx == proposal_scripts.len() - 1; + patch_aptos_governance( + &state_view, + &deserializer_config, + forbid_next_execution_hash, + ) + .context("failed to patch resolve_multistep_proposal")?; + + // Add the hash of the script to the list of approved hashes, so that the + // alternative (usually higher) execution limits can be used. + add_script_execution_hash(&state_view, script_hash) + .context("failed to add script execution hash")?; + + let script_name = script_path.file_name().unwrap().to_string_lossy(); + println!(" {}", script_name); + + // Create a new VM to ensure the loader is clean. + // The warm vm cache also needs to be explicitly flushed as it cannot detect the + // patches we performed. + flush_warm_vm_cache(); + let vm = AptosVM::new_for_gov_sim(&state_view); + let log_context = AdapterLogSchema::new(state_view.id(), 0); + let resolver = state_view.as_move_resolver(); + let (_vm_status, vm_output) = vm.execute_user_transaction( + &resolver, + &account + .account() + .transaction() + .script(Script::new(script_blob, vec![], vec![ + TransactionArgument::U64(DUMMY_PROPOSAL_ID), // dummy proposal id, ignored by the patched function + ])) + .chain_id(chain_id.chain_id()) + .sequence_number(script_idx as u64) + .gas_unit_price(gas_params.vm.txn.min_price_per_gas_unit.into()) + .max_gas_amount(100000) + .ttl(u64::MAX) + .sign(), + &log_context, + ); + // TODO: ensure all scripts trigger reconfiguration. + + let txn_output = vm_output + .try_materialize_into_transaction_output(&resolver) + .context("failed to materialize transaction output")?; + let txn_status = txn_output.status(); + match txn_status { + TransactionStatus::Keep(ExecutionStatus::Success) => { + println!(" Success") + }, + TransactionStatus::Keep(ExecutionStatus::MoveAbort { code, .. }) + if *code == MAGIC_FAILED_NEXT_EXECUTION_HASH_CHECK => + { + bail!("the last script has a non-zero next execution hash") + }, + _ => { + println!( + "{}", + format!("{:#?}", txn_status) + .lines() + .map(|line| format!(" {}", line)) + .collect::>() + .join("\n") + ); + bail!("failed to execute governance script: {}", script_name) + }, + } + + let (write_set, _events) = txn_output.into(); + state_view.apply_write_set(write_set); + } + + println!("All scripts succeeded!"); + + Ok(()) +} + +pub fn collect_proposals(root_dir: &Path) -> Result)>> { + let mut result = Vec::new(); + + for entry in WalkDir::new(root_dir) { + let entry = entry.unwrap(); + if entry.path().is_dir() { + let sub_dir = entry.path(); + let mut move_files = Vec::new(); + + for sub_entry in WalkDir::new(sub_dir).min_depth(1).max_depth(1) { + let sub_entry = sub_entry.unwrap(); + if sub_entry.path().is_file() + && sub_entry.path().extension() == Some(std::ffi::OsStr::new("move")) + { + move_files.push(sub_entry.path().to_path_buf()); + } + } + + if !move_files.is_empty() { + move_files.sort(); + result.push((sub_dir.to_path_buf(), move_files)); + } + } + } + + result.sort_by(|(path1, _), (path2, _)| path1.cmp(path2)); + + Ok(result) +} + +pub async fn simulate_all_proposals(remote_url: Url, output_dir: &Path) -> Result<()> { + let proposals = + collect_proposals(output_dir).context("failed to collect proposals for simulation")?; + + if proposals.is_empty() { + bail!("failed to simulate proposals: no proposals found") + } + + println!( + "Found {} proposal{}", + proposals.len(), + if proposals.len() == 1 { "" } else { "s" } + ); + for (proposal_dir, proposal_scripts) in &proposals { + println!(" {}", proposal_dir.display()); + + for script_path in proposal_scripts { + println!( + " {}", + script_path.file_name().unwrap().to_string_lossy() + ); + } + } + + for (proposal_dir, proposal_scripts) in &proposals { + simulate_multistep_proposal(remote_url.clone(), proposal_dir, proposal_scripts) + .await + .with_context(|| { + format!("failed to simulate proposal at {}", proposal_dir.display()) + })?; + } + + println!("All proposals succeeded!"); + + Ok(()) +} diff --git a/aptos-move/aptos-release-builder/src/utils.rs b/aptos-move/aptos-release-builder/src/utils.rs index 6265bce05ce90..a22e52d216239 100644 --- a/aptos-move/aptos-release-builder/src/utils.rs +++ b/aptos-move/aptos-release-builder/src/utils.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use aptos_crypto::HashValue; +use aptos_framework::generate_next_execution_hash_blob; use move_core_types::account_address::AccountAddress; use move_model::{code_writer::CodeWriter, emit, emitln}; @@ -44,7 +46,7 @@ pub(crate) fn generate_governance_proposal_header( writer: &CodeWriter, deps_names: &[&str], is_multi_step: bool, - next_execution_hash: Vec, + next_execution_hash: Option, ) { emitln!(writer, "script {"); writer.indent(); @@ -53,15 +55,12 @@ pub(crate) fn generate_governance_proposal_header( for deps_name in deps_names { emitln!(writer, "use {};", deps_name); } - if next_execution_hash == "vector::empty()".as_bytes() { - emitln!(writer, "use std::vector;"); - } emitln!(writer); emitln!(writer, "fun main(proposal_id: u64) {"); writer.indent(); - if is_multi_step && !next_execution_hash.is_empty() { + if is_multi_step { generate_next_execution_hash_blob(writer, AccountAddress::ONE, next_execution_hash); } else { emitln!( @@ -106,27 +105,26 @@ pub(crate) fn finish_with_footer(writer: &CodeWriter) -> String { pub(crate) fn generate_governance_proposal( writer: &CodeWriter, is_testnet: bool, - next_execution_hash: Vec, + next_execution_hash: Option, + is_multi_step: bool, deps_names: &[&str], body: F, ) -> String where F: FnOnce(&CodeWriter), { - if next_execution_hash.is_empty() { - if is_testnet { - generate_testnet_header(writer, deps_names); - } else { - generate_governance_proposal_header( - writer, - deps_names, - false, - "".to_owned().into_bytes(), - ); - } - } else { + assert!( + is_multi_step || next_execution_hash.is_none(), + "only multi-step proposals can have a next execution hash" + ); + + if is_multi_step { generate_governance_proposal_header(writer, deps_names, true, next_execution_hash); - }; + } else if is_testnet { + generate_testnet_header(writer, deps_names); + } else { + generate_governance_proposal_header(writer, deps_names, false, None); + } body(writer); finish_with_footer(writer) diff --git a/aptos-move/aptos-resource-viewer/src/lib.rs b/aptos-move/aptos-resource-viewer/src/lib.rs index a5d2cc4bc5582..d3b1cc6824192 100644 --- a/aptos-move/aptos-resource-viewer/src/lib.rs +++ b/aptos-move/aptos-resource-viewer/src/lib.rs @@ -15,6 +15,7 @@ use move_binary_format::CompiledModule; use move_core_types::{ identifier::{IdentStr, Identifier}, language_storage::{ModuleId, StructTag, TypeTag}, + transaction_argument::TransactionArgument, value::{MoveTypeLayout, MoveValue}, }; use move_resource_viewer::MoveValueAnnotator; @@ -65,7 +66,7 @@ impl<'a, S: StateView> AptosValueAnnotator<'a, S> { &self, tag: &StructTag, blob: &[u8], - ) -> anyhow::Result> { + ) -> anyhow::Result<(Option, Vec<(Identifier, MoveValue)>)> { self.0.move_struct_fields(tag, blob) } @@ -80,6 +81,15 @@ impl<'a, S: StateView> AptosValueAnnotator<'a, S> { .view_function_arguments(module, function, ty_args, args) } + pub fn view_script_arguments( + &self, + script_bytes: &[u8], + args: &[TransactionArgument], + ty_args: &[TypeTag], + ) -> anyhow::Result> { + self.0.view_script_arguments(script_bytes, args, ty_args) + } + pub fn view_fully_decorated_ty_layout( &self, type_tag: &TypeTag, diff --git a/aptos-move/aptos-transaction-benchmarks/Cargo.toml b/aptos-move/aptos-transaction-benchmarks/Cargo.toml index 4bb90c07a41ae..3fe147d12d47a 100644 --- a/aptos-move/aptos-transaction-benchmarks/Cargo.toml +++ b/aptos-move/aptos-transaction-benchmarks/Cargo.toml @@ -29,9 +29,7 @@ clap = { workspace = true } criterion = { workspace = true, features = ["html_reports"] } criterion-cpu-time = { workspace = true } num_cpus = { workspace = true } -once_cell = { workspace = true } proptest = { workspace = true } -rayon = { workspace = true } [[bench]] name = "transaction_benches" diff --git a/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs b/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs index 5e728586e547b..d3a71a1d22862 100644 --- a/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs +++ b/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{transactions, transactions::RAYON_EXEC_POOL}; +use crate::transactions; use aptos_bitvec::BitVec; use aptos_block_executor::txn_commit_hook::NoOpTransactionCommitHook; use aptos_block_partitioner::{ @@ -216,7 +216,6 @@ where _, NoOpTransactionCommitHook, >( - Arc::clone(&RAYON_EXEC_POOL), transactions, self.state_view.as_ref(), BlockExecutorConfig::new_maybe_block_limit(1, maybe_block_gas_limit), @@ -265,7 +264,6 @@ where _, NoOpTransactionCommitHook, >( - Arc::clone(&RAYON_EXEC_POOL), transactions, self.state_view.as_ref(), BlockExecutorConfig::new_maybe_block_limit( diff --git a/aptos-move/aptos-transaction-benchmarks/src/transactions.rs b/aptos-move/aptos-transaction-benchmarks/src/transactions.rs index 645bbe5475afa..d8f96d9d099a5 100644 --- a/aptos-move/aptos-transaction-benchmarks/src/transactions.rs +++ b/aptos-move/aptos-transaction-benchmarks/src/transactions.rs @@ -13,19 +13,8 @@ use aptos_language_e2e_tests::{ gas_costs::TXN_RESERVED, }; use criterion::{measurement::Measurement, BatchSize, Bencher}; -use once_cell::sync::Lazy; use proptest::strategy::Strategy; -use std::{net::SocketAddr, sync::Arc}; - -pub static RAYON_EXEC_POOL: Lazy> = Lazy::new(|| { - Arc::new( - rayon::ThreadPoolBuilder::new() - .num_threads(num_cpus::get()) - .thread_name(|index| format!("par_exec_{}", index)) - .build() - .unwrap(), - ) -}); +use std::net::SocketAddr; /// Benchmarking support for transactions. #[derive(Clone)] diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs index 70f66aa5fd4b4..fbd4baa017f34 100644 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs @@ -26,6 +26,7 @@ use aptos_types::{ Script as TransactionScript, Transaction, TransactionOutput, TransactionStatus, }, vm::configs::set_paranoid_type_checks, + AptosCoinType, }; use aptos_vm::{AptosVM, VMExecutor}; use aptos_vm_genesis::GENESIS_KEYPAIR; @@ -433,7 +434,7 @@ impl<'a> AptosTestAdapter<'a> { /// Obtain the AptosCoin amount under address `signer_addr` fn fetch_account_balance(&self, signer_addr: &AccountAddress) -> Result { - let aptos_coin_tag = CoinStoreResource::struct_tag(); + let aptos_coin_tag = CoinStoreResource::::struct_tag(); let balance_blob = self .storage diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/default_int_size.v2_exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/default_int_size.v2_exp index d95c09e388b2a..05e61d5e21757 100644 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/default_int_size.v2_exp +++ b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/default_int_size.v2_exp @@ -1,4 +1,4 @@ processed 2 tasks task 1 'run'. lines 4-21: -Error: Failed to execute transaction. ExecutionStatus: ExecutionFailure { location: Script, function: 0, code_offset: 12 } +Error: Failed to execute transaction. ExecutionStatus: ExecutionFailure { location: Script, function: 0, code_offset: 10 } diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/diamond_clicker.v2_exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/diamond_clicker.v2_exp index 7905e218669b6..e454cf5a17b59 100644 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/diamond_clicker.v2_exp +++ b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/diamond_clicker.v2_exp @@ -15,61 +15,53 @@ struct OuterStruct has key { } entry public test_upgrade(Arg0: &signer) /* def_idx: 0 */ { -L1: loc0: &signer -L2: loc1: OuterStruct -L3: loc2: &mut vector +L1: loc0: OuterStruct +L2: loc1: &mut vector +L3: loc2: u64 L4: loc3: u64 L5: loc4: u64 -L6: loc5: &mut vector -L7: loc6: u64 B0: 0: CopyLoc[0](Arg0: &signer) - 1: StLoc[1](loc0: &signer) - 2: MoveLoc[1](loc0: &signer) - 3: Call signer::address_of(&signer): address - 4: VecPack(3, 0) - 5: Pack[1](OuterStruct) - 6: StLoc[2](loc1: OuterStruct) - 7: MoveLoc[0](Arg0: &signer) - 8: MoveLoc[2](loc1: OuterStruct) - 9: MoveTo[1](OuterStruct) - 10: MutBorrowGlobal[1](OuterStruct) - 11: MutBorrowField[0](OuterStruct.any_field: vector) - 12: StLoc[3](loc2: &mut vector) - 13: LdU64(0) - 14: CopyLoc[3](loc2: &mut vector) - 15: FreezeRef - 16: VecLen(3) - 17: StLoc[4](loc3: u64) - 18: StLoc[5](loc4: u64) + 1: Call signer::address_of(&signer): address + 2: VecPack(3, 0) + 3: Pack[1](OuterStruct) + 4: StLoc[1](loc0: OuterStruct) + 5: MoveLoc[0](Arg0: &signer) + 6: MoveLoc[1](loc0: OuterStruct) + 7: MoveTo[1](OuterStruct) + 8: MutBorrowGlobal[1](OuterStruct) + 9: MutBorrowField[0](OuterStruct.any_field: vector) + 10: StLoc[2](loc1: &mut vector) + 11: LdU64(0) + 12: CopyLoc[2](loc1: &mut vector) + 13: FreezeRef + 14: VecLen(3) + 15: StLoc[3](loc2: u64) + 16: StLoc[4](loc3: u64) B1: - 19: CopyLoc[5](loc4: u64) - 20: CopyLoc[4](loc3: u64) - 21: Lt - 22: BrFalse(37) + 17: CopyLoc[4](loc3: u64) + 18: CopyLoc[3](loc2: u64) + 19: Lt + 20: BrFalse(31) B2: - 23: CopyLoc[3](loc2: &mut vector) - 24: StLoc[6](loc5: &mut vector) - 25: MoveLoc[6](loc5: &mut vector) - 26: CopyLoc[5](loc4: u64) - 27: VecMutBorrow(3) - 28: FreezeRef - 29: Call debug::print(&InnerStruct) - 30: LdU64(1) - 31: StLoc[7](loc6: u64) - 32: MoveLoc[5](loc4: u64) - 33: MoveLoc[7](loc6: u64) - 34: Add - 35: StLoc[5](loc4: u64) - 36: Branch(40) + 21: CopyLoc[2](loc1: &mut vector) + 22: CopyLoc[4](loc3: u64) + 23: VecMutBorrow(3) + 24: FreezeRef + 25: Call debug::print(&InnerStruct) + 26: MoveLoc[4](loc3: u64) + 27: LdU64(1) + 28: Add + 29: StLoc[4](loc3: u64) + 30: Branch(34) B3: - 37: MoveLoc[3](loc2: &mut vector) - 38: Pop - 39: Branch(41) + 31: MoveLoc[2](loc1: &mut vector) + 32: Pop + 33: Branch(35) B4: - 40: Branch(19) + 34: Branch(17) B5: - 41: Ret + 35: Ret } } diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.v2_exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.v2_exp index fa85289717c25..b0352e4c59288 100644 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.v2_exp +++ b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.v2_exp @@ -52,7 +52,7 @@ Events: } { type: 0x1::transaction_fee::FeeStatement - data: "9d0000000000000007000000000000000400000000000000c83a0200000000000000000000000000" + data: "9c0000000000000007000000000000000400000000000000c83a0200000000000000000000000000" }mutable inputs after call: local#0: 0 return values: 0 diff --git a/aptos-move/aptos-transactional-test-harness/tests/v2-tests/default_int_size.exp b/aptos-move/aptos-transactional-test-harness/tests/v2-tests/default_int_size.exp index d95c09e388b2a..05e61d5e21757 100644 --- a/aptos-move/aptos-transactional-test-harness/tests/v2-tests/default_int_size.exp +++ b/aptos-move/aptos-transactional-test-harness/tests/v2-tests/default_int_size.exp @@ -1,4 +1,4 @@ processed 2 tasks task 1 'run'. lines 4-21: -Error: Failed to execute transaction. ExecutionStatus: ExecutionFailure { location: Script, function: 0, code_offset: 12 } +Error: Failed to execute transaction. ExecutionStatus: ExecutionFailure { location: Script, function: 0, code_offset: 10 } diff --git a/aptos-move/aptos-validator-interface/src/lib.rs b/aptos-move/aptos-validator-interface/src/lib.rs index 24e6945fd5963..9414fb2298476 100644 --- a/aptos-move/aptos-validator-interface/src/lib.rs +++ b/aptos-move/aptos-validator-interface/src/lib.rs @@ -12,7 +12,7 @@ use aptos_types::{ account_address::AccountAddress, state_store::{ state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, - Result as StateViewResult, TStateView, + Result as StateViewResult, StateViewId, TStateView, }, transaction::{Transaction, TransactionInfo, Version}, }; @@ -160,6 +160,10 @@ impl DebuggerStateView { impl TStateView for DebuggerStateView { type Key = StateKey; + fn id(&self) -> StateViewId { + StateViewId::Replay + } + fn get_state_value(&self, state_key: &StateKey) -> StateViewResult> { self.get_state_value_internal(state_key, self.version) .map_err(Into::into) diff --git a/aptos-move/aptos-validator-interface/src/storage_interface.rs b/aptos-move/aptos-validator-interface/src/storage_interface.rs index 3abf628eeff5a..1553b9cd06b8e 100644 --- a/aptos-move/aptos-validator-interface/src/storage_interface.rs +++ b/aptos-move/aptos-validator-interface/src/storage_interface.rs @@ -32,6 +32,7 @@ impl DBDebuggerInterface { false, /* indexer */ BUFFERED_STATE_TARGET_ITEMS, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, + None, ) .map_err(anyhow::Error::from)?, ))) diff --git a/aptos-move/aptos-vm-benchmarks/Cargo.toml b/aptos-move/aptos-vm-benchmarks/Cargo.toml index 2095c5a78aff1..d0718bfbad14d 100644 --- a/aptos-move/aptos-vm-benchmarks/Cargo.toml +++ b/aptos-move/aptos-vm-benchmarks/Cargo.toml @@ -2,7 +2,7 @@ name = "aptos-vm-benchmarks" version = "0.1.0" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +license = { workspace = true } [dependencies] aptos-cached-packages = { workspace = true } diff --git a/aptos-move/aptos-vm-logging/src/log_schema.rs b/aptos-move/aptos-vm-logging/src/log_schema.rs index 383dda848a4ec..494ad5dce1175 100644 --- a/aptos-move/aptos-vm-logging/src/log_schema.rs +++ b/aptos-move/aptos-vm-logging/src/log_schema.rs @@ -52,6 +52,13 @@ impl AdapterLogSchema { base_version: Some(base_version), txn_idx, }, + StateViewId::Replay => Self { + name: LogEntry::Execution, + block_id: None, + first_version: None, + base_version: None, + txn_idx, + }, StateViewId::Miscellaneous => Self { name: LogEntry::Miscellaneous, block_id: None, diff --git a/aptos-move/aptos-vm-types/src/change_set.rs b/aptos-move/aptos-vm-types/src/change_set.rs index f44a15b584d98..3c2ca78492403 100644 --- a/aptos-move/aptos-vm-types/src/change_set.rs +++ b/aptos-move/aptos-vm-types/src/change_set.rs @@ -6,7 +6,7 @@ use crate::{ AbstractResourceWriteOp, GroupWrite, InPlaceDelayedFieldChangeOp, ResourceGroupInPlaceDelayedFieldChangeOp, WriteWithDelayedFieldsOp, }, - check_change_set::CheckChangeSet, + module_write_set::ModuleWriteSet, resolver::ExecutorView, }; use aptos_aggregator::{ @@ -80,7 +80,6 @@ pub fn randomly_check_layout_matches( #[derive(Debug, Clone, Eq, PartialEq)] pub struct VMChangeSet { resource_write_set: BTreeMap, - module_write_set: BTreeMap, events: Vec<(ContractEvent, Option)>, // Changes separated out from the writes, for better concurrency, @@ -110,7 +109,6 @@ impl VMChangeSet { pub fn empty() -> Self { Self { resource_write_set: BTreeMap::new(), - module_write_set: BTreeMap::new(), events: vec![], delayed_field_change_set: BTreeMap::new(), aggregator_v1_write_set: BTreeMap::new(), @@ -120,32 +118,24 @@ impl VMChangeSet { pub fn new( resource_write_set: BTreeMap, - module_write_set: BTreeMap, events: Vec<(ContractEvent, Option)>, delayed_field_change_set: BTreeMap>, aggregator_v1_write_set: BTreeMap, aggregator_v1_delta_set: BTreeMap, - checker: &dyn CheckChangeSet, - ) -> PartialVMResult { - let change_set = Self { + ) -> Self { + Self { resource_write_set, - module_write_set, events, delayed_field_change_set, aggregator_v1_write_set, aggregator_v1_delta_set, - }; - // Returns an error if structure of the change set is not valid, - // e.g. the size in bytes is too large. - checker.check_change_set(&change_set)?; - Ok(change_set) + } } // TODO[agg_v2](cleanup) see if we can remove in favor of `new`. pub fn new_expanded( resource_write_set: BTreeMap>)>, resource_group_write_set: BTreeMap, - module_write_set: BTreeMap, aggregator_v1_write_set: BTreeMap, aggregator_v1_delta_set: BTreeMap, delayed_field_change_set: BTreeMap>, @@ -155,9 +145,8 @@ impl VMChangeSet { >, group_reads_needing_delayed_field_exchange: BTreeMap, events: Vec<(ContractEvent, Option)>, - checker: &dyn CheckChangeSet, ) -> PartialVMResult { - Self::new( + Ok(Self::new( resource_write_set .into_iter() .map::, _>(|(k, (w, l))| { @@ -214,74 +203,22 @@ impl VMChangeSet { } }, )?, - module_write_set, events, delayed_field_change_set, aggregator_v1_write_set, aggregator_v1_delta_set, - checker, - ) - } - - /// Builds a new change set from the storage representation. - /// - /// **WARNING**: this creates a write set that assumes dynamic change set optimizations to be disabled. - /// this needs to be applied directly to storage, you cannot get appropriate reads from this in a - /// dynamic change set optimization enabled context. - /// We have two dynamic change set optimizations, both there to reduce conflicts between transactions: - /// - exchanging delayed fields and leaving their materialization to happen at the end - /// - unpacking resource groups and treating each resource inside it separately - /// - /// **WARNING**: Has complexity O(#write_ops) because we need to iterate - /// over blobs and split them into resources or modules. Only used to - /// support transactions with write-set payload. - /// - /// Note: does not separate out individual resource group updates. - pub fn try_from_storage_change_set_with_delayed_field_optimization_disabled( - change_set: StorageChangeSet, - checker: &dyn CheckChangeSet, - ) -> VMResult { - let (write_set, events) = change_set.into_inner(); - - // There should be no aggregator writes if we have a change set from - // storage. - let mut resource_write_set = BTreeMap::new(); - let mut module_write_set = BTreeMap::new(); - - for (state_key, write_op) in write_set { - if matches!(state_key.inner(), StateKeyInner::AccessPath(ap) if ap.is_code()) { - module_write_set.insert(state_key, write_op); - } else { - // TODO[agg_v1](fix) While everything else must be a resource, first - // version of aggregators is implemented as a table item. Revisit when - // we split MVHashMap into data and aggregators. - - // We can set layout to None, as we are not in the is_delayed_field_optimization_capable context - resource_write_set.insert(state_key, AbstractResourceWriteOp::Write(write_op)); - } - } - - // We can set layout to None, as we are not in the is_delayed_field_optimization_capable context - let events = events.into_iter().map(|event| (event, None)).collect(); - let change_set = Self { - resource_write_set, - module_write_set, - delayed_field_change_set: BTreeMap::new(), - aggregator_v1_write_set: BTreeMap::new(), - aggregator_v1_delta_set: BTreeMap::new(), - events, - }; - checker - .check_change_set(&change_set) - .map_err(|e| e.finish(Location::Undefined))?; - Ok(change_set) + )) } /// Converts VM-native change set into its storage representation with fully /// serialized changes. The conversion fails if: /// - deltas are not materialized. /// - resource group writes are not (combined &) converted to resource writes. - pub fn try_into_storage_change_set(self) -> Result { + /// In addition, the caller can include changes to published modules. + pub fn try_combine_into_storage_change_set( + self, + module_write_set: ModuleWriteSet, + ) -> Result { // Converting VMChangeSet into TransactionOutput (i.e. storage change set), can // be done here only if dynamic_change_set_optimizations have not been used/produced // data into the output. @@ -290,7 +227,6 @@ impl VMChangeSet { // that knows how to deal with it. let Self { resource_write_set, - module_write_set, aggregator_v1_write_set, aggregator_v1_delta_set, delayed_field_change_set, @@ -324,7 +260,7 @@ impl VMChangeSet { }) .collect::, _>>()?, ); - write_set_mut.extend(module_write_set); + write_set_mut.extend(module_write_set.into_write_ops()); write_set_mut.extend(aggregator_v1_write_set); let events = events.into_iter().map(|(e, _)| e).collect(); @@ -339,75 +275,16 @@ impl VMChangeSet { .iter() .map(|(k, v)| (k, v.try_as_concrete_write())) .chain( - self.module_write_set() + self.aggregator_v1_write_set() .iter() - .chain(self.aggregator_v1_write_set().iter()) .map(|(k, v)| (k, Some(v))), ) } - pub fn write_set_size_iter(&self) -> impl Iterator { - self.resource_write_set() - .iter() - .map(|(k, v)| (k, v.materialized_size())) - .chain( - self.module_write_set() - .iter() - .chain(self.aggregator_v1_write_set().iter()) - .map(|(k, v)| (k, v.write_op_size())), - ) - } - - pub fn num_write_ops(&self) -> usize { - self.resource_write_set().len() - + self.module_write_set().len() - + self.aggregator_v1_write_set().len() - } - - /// Deposit amount is inserted into metadata at a different time than the WriteOp is created. - /// So this method is needed to be able to update metadata generically across different variants. - pub fn write_op_info_iter_mut<'a>( - &'a mut self, - executor_view: &'a dyn ExecutorView, - ) -> impl Iterator> { - let resources = self.resource_write_set.iter_mut().map(|(key, op)| { - Ok(WriteOpInfo { - key, - op_size: op.materialized_size(), - prev_size: op.prev_materialized_size(key, executor_view)?, - metadata_mut: op.get_metadata_mut(), - }) - }); - let modules = self.module_write_set.iter_mut().map(|(key, op)| { - Ok(WriteOpInfo { - key, - op_size: op.write_op_size(), - prev_size: executor_view.get_module_state_value_size(key)?.unwrap_or(0), - metadata_mut: op.get_metadata_mut(), - }) - }); - let v1_aggregators = self.aggregator_v1_write_set.iter_mut().map(|(key, op)| { - Ok(WriteOpInfo { - key, - op_size: op.write_op_size(), - prev_size: executor_view - .get_aggregator_v1_state_value_size(key)? - .unwrap_or(0), - metadata_mut: op.get_metadata_mut(), - }) - }); - - resources.chain(modules).chain(v1_aggregators) - } - pub fn resource_write_set(&self) -> &BTreeMap { &self.resource_write_set } - pub fn module_write_set(&self) -> &BTreeMap { - &self.module_write_set - } - // Called by `into_transaction_output_with_materialized_writes` only. pub(crate) fn extend_aggregator_v1_write_set( &mut self, @@ -644,23 +521,6 @@ impl VMChangeSet { Ok(()) } - fn squash_additional_module_writes( - write_set: &mut BTreeMap, - additional_write_set: BTreeMap, - ) -> PartialVMResult<()> { - for (key, additional_write_op) in additional_write_set.into_iter() { - match write_set.entry(key) { - Occupied(mut entry) => { - squash_writes_pair!(entry, additional_write_op); - }, - Vacant(entry) => { - entry.insert(additional_write_op); - }, - } - } - Ok(()) - } - fn squash_additional_resource_write_ops< K: Hash + Eq + PartialEq + Ord + Clone + std::fmt::Debug, >( @@ -881,11 +741,9 @@ impl VMChangeSet { pub fn squash_additional_change_set( &mut self, additional_change_set: Self, - checker: &dyn CheckChangeSet, ) -> PartialVMResult<()> { let Self { resource_write_set: additional_resource_write_set, - module_write_set: additional_module_write_set, aggregator_v1_write_set: additional_aggregator_write_set, aggregator_v1_delta_set: additional_aggregator_delta_set, delayed_field_change_set: additional_delayed_field_change_set, @@ -902,17 +760,12 @@ impl VMChangeSet { &mut self.resource_write_set, additional_resource_write_set, )?; - Self::squash_additional_module_writes( - &mut self.module_write_set, - additional_module_write_set, - )?; Self::squash_additional_delayed_field_changes( &mut self.delayed_field_change_set, additional_delayed_field_change_set, )?; self.events.extend(additional_events); - - checker.check_change_set(self) + Ok(()) } pub fn has_creation(&self) -> bool { @@ -921,6 +774,59 @@ impl VMChangeSet { } } +/// Builds a new change set from the storage representation. +/// +/// **WARNING**: this creates a write set that assumes dynamic change set optimizations to be disabled. +/// this needs to be applied directly to storage, you cannot get appropriate reads from this in a +/// dynamic change set optimization enabled context. +/// We have two dynamic change set optimizations, both there to reduce conflicts between transactions: +/// - exchanging delayed fields and leaving their materialization to happen at the end +/// - unpacking resource groups and treating each resource inside it separately +/// +/// **WARNING**: Has complexity O(#write_ops) because we need to iterate +/// over blobs and split them into resources or modules. Only used to +/// support transactions with write-set payload. +/// +/// Note: does not separate out individual resource group updates. +pub fn create_vm_change_set_with_module_write_set_when_delayed_field_optimization_disabled( + change_set: StorageChangeSet, +) -> (VMChangeSet, ModuleWriteSet) { + let (write_set, events) = change_set.into_inner(); + + // There should be no aggregator writes if we have a change set from + // storage. + let mut resource_write_set = BTreeMap::new(); + let mut module_write_ops = BTreeMap::new(); + + for (state_key, write_op) in write_set { + if matches!(state_key.inner(), StateKeyInner::AccessPath(ap) if ap.is_code()) { + module_write_ops.insert(state_key, write_op); + } else { + // TODO[agg_v1](fix) While everything else must be a resource, first + // version of aggregators is implemented as a table item. Revisit when + // we split MVHashMap into data and aggregators. + + // We can set layout to None, as we are not in the is_delayed_field_optimization_capable context + resource_write_set.insert(state_key, AbstractResourceWriteOp::Write(write_op)); + } + } + + // We can set layout to None, as we are not in the is_delayed_field_optimization_capable context + let events = events.into_iter().map(|event| (event, None)).collect(); + let change_set = VMChangeSet::new( + resource_write_set, + events, + BTreeMap::new(), + BTreeMap::new(), + BTreeMap::new(), + ); + + // The flag if modules have been published to a special address is irrelevant because + // write set transaction does not run an epilogue. Therefore, it is simply set to true. + let module_write_set = ModuleWriteSet::new(true, module_write_ops); + (change_set, module_write_set) +} + pub struct WriteOpInfo<'a> { pub key: &'a StateKey, pub op_size: WriteOpSize, @@ -928,4 +834,70 @@ pub struct WriteOpInfo<'a> { pub metadata_mut: &'a mut StateValueMetadata, } +/// Represents the main functionality of any change set representation: +/// 1. It must contain write ops, and allow iterating over their sizes, +/// as well as other information. +/// 2. it must also contain events. +pub trait ChangeSetInterface { + fn num_write_ops(&self) -> usize; + + fn write_set_size_iter(&self) -> impl Iterator; + + fn events_iter(&self) -> impl Iterator; + + fn write_op_info_iter_mut<'a>( + &'a mut self, + executor_view: &'a dyn ExecutorView, + ) -> impl Iterator>; +} + +impl ChangeSetInterface for VMChangeSet { + fn num_write_ops(&self) -> usize { + // Note: we only use resources and aggregators because they use write ops directly, + // and deltas & events are not part of these. + self.resource_write_set().len() + self.aggregator_v1_write_set().len() + } + + fn write_set_size_iter(&self) -> impl Iterator { + self.resource_write_set() + .iter() + .map(|(k, v)| (k, v.materialized_size())) + .chain( + self.aggregator_v1_write_set() + .iter() + .map(|(k, v)| (k, v.write_op_size())), + ) + } + + fn write_op_info_iter_mut<'a>( + &'a mut self, + executor_view: &'a dyn ExecutorView, + ) -> impl Iterator> { + let resources = self.resource_write_set.iter_mut().map(|(key, op)| { + Ok(WriteOpInfo { + key, + op_size: op.materialized_size(), + prev_size: op.prev_materialized_size(key, executor_view)?, + metadata_mut: op.get_metadata_mut(), + }) + }); + let v1_aggregators = self.aggregator_v1_write_set.iter_mut().map(|(key, op)| { + Ok(WriteOpInfo { + key, + op_size: op.write_op_size(), + prev_size: executor_view + .get_aggregator_v1_state_value_size(key)? + .unwrap_or(0), + metadata_mut: op.get_metadata_mut(), + }) + }); + + resources.chain(v1_aggregators) + } + + fn events_iter(&self) -> impl Iterator { + self.events().iter().map(|(e, _)| e) + } +} + // Tests are in test_change_set.rs. diff --git a/aptos-move/aptos-vm-types/src/check_change_set.rs b/aptos-move/aptos-vm-types/src/check_change_set.rs deleted file mode 100644 index 1835bd82580a1..0000000000000 --- a/aptos-move/aptos-vm-types/src/check_change_set.rs +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::change_set::VMChangeSet; -use move_binary_format::errors::PartialVMResult; - -/// Trait to check the contents of a change set, e.g. the total number of -/// bytes per write op or event. -pub trait CheckChangeSet { - fn check_change_set(&self, change_set: &VMChangeSet) -> PartialVMResult<()>; -} diff --git a/aptos-move/aptos-vm-types/src/environment.rs b/aptos-move/aptos-vm-types/src/environment.rs index 1438884087e18..f4ca3ff253fa2 100644 --- a/aptos-move/aptos-vm-types/src/environment.rs +++ b/aptos-move/aptos-vm-types/src/environment.rs @@ -17,28 +17,23 @@ use std::sync::Arc; // TODO(George): move configs here from types crate. pub fn aptos_prod_ty_builder( - features: &Features, gas_feature_version: u64, gas_params: &AptosGasParameters, ) -> TypeBuilder { - if features.is_limit_type_size_enabled() && gas_feature_version >= RELEASE_V1_15 { + if gas_feature_version >= RELEASE_V1_15 { let max_ty_size = gas_params.vm.txn.max_ty_size; let max_ty_depth = gas_params.vm.txn.max_ty_depth; TypeBuilder::with_limits(max_ty_size.into(), max_ty_depth.into()) } else { - aptos_default_ty_builder(features) + aptos_default_ty_builder() } } -pub fn aptos_default_ty_builder(features: &Features) -> TypeBuilder { - if features.is_limit_type_size_enabled() { - // Type builder to use when: - // 1. Type size gas parameters are not yet in gas schedule (before V14). - // 2. No gas parameters are found on-chain. - TypeBuilder::with_limits(128, 20) - } else { - TypeBuilder::Legacy - } +pub fn aptos_default_ty_builder() -> TypeBuilder { + // Type builder to use when: + // 1. Type size gas parameters are not yet in gas schedule (before 1.15). + // 2. No gas parameters are found on-chain. + TypeBuilder::with_limits(128, 20) } /// A runtime environment which can be used for VM initialization and more. @@ -68,7 +63,7 @@ impl Environment { } let timed_features = timed_features_builder.build(); - let ty_builder = aptos_default_ty_builder(&features); + let ty_builder = aptos_default_ty_builder(); Self::initialize(features, timed_features, chain_id, ty_builder) } @@ -80,7 +75,7 @@ impl Environment { .with_override_profile(TimedFeatureOverride::Testing) .build(); - let ty_builder = aptos_default_ty_builder(&features); + let ty_builder = aptos_default_ty_builder(); Arc::new(Self::initialize( features, timed_features, @@ -89,16 +84,6 @@ impl Environment { )) } - pub fn with_features_for_testing(self, features: Features) -> Arc { - let ty_builder = aptos_default_ty_builder(&features); - Arc::new(Self::initialize( - features, - self.timed_features, - self.chain_id, - ty_builder, - )) - } - pub fn try_enable_delayed_field_optimization(mut self) -> Self { if self.features.is_aggregator_v2_delayed_fields_enabled() { self.vm_config.delayed_field_optimization_enabled = true; @@ -132,14 +117,7 @@ impl Environment { chain_id: ChainId, ty_builder: TypeBuilder, ) -> Self { - let pseudo_meter_vector_ty_to_ty_tag_construction = true; - - let vm_config = aptos_prod_vm_config( - &features, - &timed_features, - pseudo_meter_vector_ty_to_ty_tag_construction, - ty_builder, - ); + let vm_config = aptos_prod_vm_config(&features, &timed_features, ty_builder); Self { chain_id, diff --git a/aptos-move/aptos-vm-types/src/lib.rs b/aptos-move/aptos-vm-types/src/lib.rs index fd66470b0707d..7858bacc0d1d2 100644 --- a/aptos-move/aptos-vm-types/src/lib.rs +++ b/aptos-move/aptos-vm-types/src/lib.rs @@ -3,8 +3,8 @@ pub mod abstract_write_op; pub mod change_set; -pub mod check_change_set; pub mod environment; +pub mod module_write_set; pub mod output; pub mod resolver; pub mod resource_group_adapter; diff --git a/aptos-move/aptos-vm-types/src/module_write_set.rs b/aptos-move/aptos-vm-types/src/module_write_set.rs new file mode 100644 index 0000000000000..a230c43780f6c --- /dev/null +++ b/aptos-move/aptos-vm-types/src/module_write_set.rs @@ -0,0 +1,83 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{change_set::WriteOpInfo, resolver::ExecutorView}; +use aptos_types::{ + state_store::state_key::StateKey, + write_set::{TransactionWrite, WriteOp, WriteOpSize}, +}; +use move_binary_format::errors::{PartialVMError, PartialVMResult}; +use move_core_types::vm_status::StatusCode; +use std::collections::BTreeMap; + +#[must_use] +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct ModuleWriteSet { + // True if there are write ops which write to 0x1, etc. A special flag + // is used for performance reasons, as otherwise we would need traverse + // the write ops and deserializes access paths. + has_writes_to_special_address: bool, + write_ops: BTreeMap, +} + +impl ModuleWriteSet { + pub fn empty() -> Self { + Self { + has_writes_to_special_address: false, + write_ops: BTreeMap::new(), + } + } + + pub fn new( + has_writes_to_special_address: bool, + write_ops: BTreeMap, + ) -> Self { + Self { + has_writes_to_special_address, + write_ops, + } + } + + pub fn into_write_ops(self) -> impl IntoIterator { + self.write_ops.into_iter() + } + + pub fn write_ops(&self) -> &BTreeMap { + &self.write_ops + } + + pub fn num_write_ops(&self) -> usize { + self.write_ops.len() + } + + pub fn write_set_size_iter(&self) -> impl Iterator { + self.write_ops.iter().map(|(k, v)| (k, v.write_op_size())) + } + + pub fn write_op_info_iter_mut<'a>( + &'a mut self, + executor_view: &'a dyn ExecutorView, + ) -> impl Iterator> { + self.write_ops.iter_mut().map(|(key, op)| { + Ok(WriteOpInfo { + key, + op_size: op.write_op_size(), + prev_size: executor_view.get_module_state_value_size(key)?.unwrap_or(0), + metadata_mut: op.get_metadata_mut(), + }) + }) + } + + pub fn has_writes_to_special_address(&self) -> bool { + self.has_writes_to_special_address + } + + pub fn is_empty_or_invariant_violation(&self) -> PartialVMResult<()> { + if !self.write_ops().is_empty() { + return Err(PartialVMError::new( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + )); + } + Ok(()) + } +} diff --git a/aptos-move/aptos-vm-types/src/output.rs b/aptos-move/aptos-vm-types/src/output.rs index 74acebfe99c88..22bf8b0af5a2a 100644 --- a/aptos-move/aptos-vm-types/src/output.rs +++ b/aptos-move/aptos-vm-types/src/output.rs @@ -1,17 +1,29 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::change_set::VMChangeSet; -use aptos_aggregator::{resolver::AggregatorV1Resolver, types::code_invariant_error}; -use aptos_types::fee_statement::FeeStatement; +use crate::{ + abstract_write_op::AbstractResourceWriteOp, + change_set::{ChangeSetInterface, VMChangeSet}, + module_write_set::ModuleWriteSet, +}; +use aptos_aggregator::{ + delayed_change::DelayedChange, delta_change_set::DeltaOp, resolver::AggregatorV1Resolver, + types::code_invariant_error, +}; use aptos_types::{ - contract_event::ContractEvent, //contract_event::ContractEvent, + contract_event::ContractEvent, delayed_fields::PanicError, + fee_statement::FeeStatement, state_store::state_key::StateKey, transaction::{TransactionAuxiliaryData, TransactionOutput, TransactionStatus}, write_set::WriteOp, }; -use move_core_types::vm_status::{StatusCode, VMStatus}; +use move_core_types::{ + value::MoveTypeLayout, + vm_status::{StatusCode, VMStatus}, +}; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; +use std::collections::BTreeMap; /// Output produced by the VM after executing a transaction. /// @@ -20,6 +32,7 @@ use move_core_types::vm_status::{StatusCode, VMStatus}; #[derive(Debug, Clone, Eq, PartialEq)] pub struct VMOutput { change_set: VMChangeSet, + module_write_set: ModuleWriteSet, fee_statement: FeeStatement, status: TransactionStatus, auxiliary_data: TransactionAuxiliaryData, @@ -28,12 +41,14 @@ pub struct VMOutput { impl VMOutput { pub fn new( change_set: VMChangeSet, + module_write_set: ModuleWriteSet, fee_statement: FeeStatement, status: TransactionStatus, auxiliary_data: TransactionAuxiliaryData, ) -> Self { Self { change_set, + module_write_set, fee_statement, status, auxiliary_data, @@ -43,50 +58,37 @@ impl VMOutput { pub fn empty_with_status(status: TransactionStatus) -> Self { Self { change_set: VMChangeSet::empty(), + module_write_set: ModuleWriteSet::empty(), fee_statement: FeeStatement::zero(), status, auxiliary_data: TransactionAuxiliaryData::default(), } } - pub fn unpack( - self, - ) -> ( - VMChangeSet, - u64, - TransactionStatus, - TransactionAuxiliaryData, - ) { - ( - self.change_set, - self.fee_statement.gas_used(), - self.status, - self.auxiliary_data, - ) + pub fn aggregator_v1_delta_set(&self) -> &BTreeMap { + self.change_set.aggregator_v1_delta_set() } - pub fn unpack_with_fee_statement( - self, - ) -> ( - VMChangeSet, - FeeStatement, - TransactionStatus, - TransactionAuxiliaryData, - ) { - ( - self.change_set, - self.fee_statement, - self.status, - self.auxiliary_data, - ) + pub fn aggregator_v1_write_set(&self) -> &BTreeMap { + self.change_set.aggregator_v1_write_set() + } + + pub fn resource_write_set(&self) -> &BTreeMap { + self.change_set.resource_write_set() + } + + pub fn module_write_set(&self) -> &BTreeMap { + self.module_write_set.write_ops() } - pub fn change_set(&self) -> &VMChangeSet { - &self.change_set + pub fn delayed_field_change_set( + &self, + ) -> &BTreeMap> { + self.change_set.delayed_field_change_set() } - pub fn change_set_mut(&mut self) -> &mut VMChangeSet { - &mut self.change_set + pub fn events(&self) -> &[(ContractEvent, Option)] { + self.change_set.events() } pub fn gas_used(&self) -> u64 { @@ -105,6 +107,31 @@ impl VMOutput { &self.auxiliary_data } + pub fn materialized_size(&self) -> u64 { + let mut size = 0; + for (state_key, write_size) in self + .change_set + .write_set_size_iter() + .chain(self.module_write_set.write_set_size_iter()) + { + size += state_key.size() as u64 + write_size.write_len().unwrap_or(0); + } + + for event in self.change_set.events_iter() { + size += event.size() as u64; + } + size + } + + pub fn concrete_write_set_iter(&self) -> impl Iterator)> { + self.change_set.concrete_write_set_iter().chain( + self.module_write_set + .write_ops() + .iter() + .map(|(k, v)| (k, Some(v))), + ) + } + /// Materializes delta sets. /// Guarantees that if deltas are materialized successfully, the output /// has an empty delta set. @@ -120,8 +147,8 @@ impl VMOutput { // change set is empty. In both cases, we do not need to apply any // deltas and can return immediately. if self.status().is_discarded() - || (self.change_set().aggregator_v1_delta_set().is_empty() - && self.change_set().delayed_field_change_set().is_empty()) + || (self.aggregator_v1_delta_set().is_empty() + && self.delayed_field_change_set().is_empty()) { return Ok(()); } @@ -138,7 +165,7 @@ impl VMOutput { resolver: &impl AggregatorV1Resolver, ) -> anyhow::Result { self.try_materialize(resolver)?; - Self::convert_to_transaction_output(self).map_err(|e| { + self.into_transaction_output().map_err(|e| { VMStatus::error( StatusCode::DELAYED_MATERIALIZATION_CODE_INVARIANT_ERROR, Some(e.to_string()), @@ -146,27 +173,22 @@ impl VMOutput { }) } - /// Constructs `TransactionOutput`, without doing `try_materialize` - pub fn into_transaction_output(self) -> anyhow::Result { - let (change_set, fee_statement, status, auxiliary_data) = self.unpack_with_fee_statement(); - let output = VMOutput::new(change_set, fee_statement, status, auxiliary_data); - Self::convert_to_transaction_output(output).map_err(|e| { - VMStatus::error( - StatusCode::DELAYED_MATERIALIZATION_CODE_INVARIANT_ERROR, - Some(e.to_string()), - ) - }) - } - - fn convert_to_transaction_output( - materialized_output: VMOutput, - ) -> Result { - let (vm_change_set, gas_used, status, auxiliary_data) = materialized_output.unpack(); - let (write_set, events) = vm_change_set.try_into_storage_change_set()?.into_inner(); + /// Constructs `TransactionOutput`, without doing `try_materialize`. + pub fn into_transaction_output(self) -> Result { + let Self { + change_set, + module_write_set, + fee_statement, + status, + auxiliary_data, + } = self; + let (write_set, events) = change_set + .try_combine_into_storage_change_set(module_write_set)? + .into_inner(); Ok(TransactionOutput::new( write_set, events, - gas_used, + fee_statement.gas_used(), status, auxiliary_data, )) @@ -181,16 +203,14 @@ impl VMOutput { patched_events: Vec, ) -> Result { // materialize aggregator V1 deltas into writes - if materialized_aggregator_v1_deltas.len() - != self.change_set().aggregator_v1_delta_set().len() - { + if materialized_aggregator_v1_deltas.len() != self.aggregator_v1_delta_set().len() { return Err(code_invariant_error( "Different number of materialized deltas and deltas in the output.", )); } if !materialized_aggregator_v1_deltas .iter() - .all(|(k, _)| self.change_set().aggregator_v1_delta_set().contains_key(k)) + .all(|(k, _)| self.aggregator_v1_delta_set().contains_key(k)) { return Err(code_invariant_error( "Materialized aggregator writes contain a key which does not exist in delta set.", @@ -207,13 +227,13 @@ impl VMOutput { let _ = self.change_set.drain_delayed_field_change_set(); // materialize delayed fields into events - if patched_events.len() != self.change_set().events().len() { + if patched_events.len() != self.events().len() { return Err(code_invariant_error( "Different number of events and patched events in the output.", )); } self.change_set.set_events(patched_events.into_iter()); - Self::convert_to_transaction_output(self) + self.into_transaction_output() } } diff --git a/aptos-move/aptos-vm-types/src/storage/change_set_configs.rs b/aptos-move/aptos-vm-types/src/storage/change_set_configs.rs index f0ee9592458e2..d3577e056f222 100644 --- a/aptos-move/aptos-vm-types/src/storage/change_set_configs.rs +++ b/aptos-move/aptos-vm-types/src/storage/change_set_configs.rs @@ -1,10 +1,10 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{change_set::VMChangeSet, check_change_set::CheckChangeSet}; +use crate::change_set::ChangeSetInterface; use aptos_gas_schedule::AptosGasParameters; -use move_binary_format::errors::{PartialVMError, PartialVMResult}; -use move_core_types::vm_status::StatusCode; +use move_binary_format::errors::{Location, PartialVMError}; +use move_core_types::vm_status::{StatusCode, VMStatus}; #[derive(Clone, Debug)] pub struct ChangeSetConfigs { @@ -82,15 +82,20 @@ impl ChangeSetConfigs { params.max_write_ops_per_transaction.into(), ) } -} -impl CheckChangeSet for ChangeSetConfigs { - fn check_change_set(&self, change_set: &VMChangeSet) -> PartialVMResult<()> { + pub fn check_change_set(&self, change_set: &impl ChangeSetInterface) -> Result<(), VMStatus> { + let storage_write_limit_reached = |maybe_message: Option<&str>| { + let mut err = PartialVMError::new(StatusCode::STORAGE_WRITE_LIMIT_REACHED); + if let Some(message) = maybe_message { + err = err.with_message(message.to_string()) + } + Err(err.finish(Location::Undefined).into_vm_status()) + }; + if self.max_write_ops_per_transaction != 0 && change_set.num_write_ops() as u64 > self.max_write_ops_per_transaction { - return Err(PartialVMError::new(StatusCode::STORAGE_WRITE_LIMIT_REACHED) - .with_message("Too many write ops.".to_string())); + return storage_write_limit_reached(Some("Too many write ops.")); } let mut write_set_size = 0; @@ -98,24 +103,24 @@ impl CheckChangeSet for ChangeSetConfigs { if let Some(len) = op_size.write_len() { let write_op_size = len + (key.size() as u64); if write_op_size > self.max_bytes_per_write_op { - return Err(PartialVMError::new(StatusCode::STORAGE_WRITE_LIMIT_REACHED)); + return storage_write_limit_reached(None); } write_set_size += write_op_size; } if write_set_size > self.max_bytes_all_write_ops_per_transaction { - return Err(PartialVMError::new(StatusCode::STORAGE_WRITE_LIMIT_REACHED)); + return storage_write_limit_reached(None); } } let mut total_event_size = 0; - for (event, _) in change_set.events() { + for event in change_set.events_iter() { let size = event.event_data().len() as u64; if size > self.max_bytes_per_event { - return Err(PartialVMError::new(StatusCode::STORAGE_WRITE_LIMIT_REACHED)); + return storage_write_limit_reached(None); } total_event_size += size; if total_event_size > self.max_bytes_all_events_per_transaction { - return Err(PartialVMError::new(StatusCode::STORAGE_WRITE_LIMIT_REACHED)); + return storage_write_limit_reached(None); } } diff --git a/aptos-move/aptos-vm-types/src/tests/test_change_set.rs b/aptos-move/aptos-vm-types/src/tests/test_change_set.rs index 7b492572b710b..af095be82cd8e 100644 --- a/aptos-move/aptos-vm-types/src/tests/test_change_set.rs +++ b/aptos-move/aptos-vm-types/src/tests/test_change_set.rs @@ -4,12 +4,16 @@ use super::utils::{mock_tag_0, VMChangeSetBuilder}; use crate::{ abstract_write_op::{AbstractResourceWriteOp, GroupWrite}, - change_set::VMChangeSet, + change_set::{ + create_vm_change_set_with_module_write_set_when_delayed_field_optimization_disabled, + VMChangeSet, + }, + module_write_set::ModuleWriteSet, resolver::ResourceGroupSize, tests::utils::{ as_bytes, as_state_key, mock_add, mock_create, mock_create_with_layout, mock_delete, mock_delete_with_layout, mock_modify, mock_modify_with_layout, mock_tag_1, raw_metadata, - ExpandedVMChangeSetBuilder, MockChangeSetChecker, + ExpandedVMChangeSetBuilder, }, }; use aptos_aggregator::{ @@ -86,21 +90,6 @@ macro_rules! resource_write_set_1 { }; } -macro_rules! module_write_set_1 { - ($d:ident) => { - vec![ - mock_create(format!("0{}", $d), 0), - mock_modify(format!("1{}", $d), 1), - mock_delete(format!("2{}", $d)), - mock_create(format!("7{}", $d), 7), - mock_create(format!("8{}", $d), 8), - mock_modify(format!("10{}", $d), 10), - mock_modify(format!("11{}", $d), 11), - mock_delete(format!("12{}", $d)), - ] - }; -} - macro_rules! resource_write_set_2 { ($d:ident) => { vec![ @@ -116,20 +105,6 @@ macro_rules! resource_write_set_2 { }; } -macro_rules! module_write_set_2 { - ($d:ident) => { - vec![ - mock_create(format!("3{}", $d), 103), - mock_modify(format!("4{}", $d), 104), - mock_delete(format!("5{}", $d)), - mock_modify(format!("7{}", $d), 107), - mock_delete(format!("8{}", $d)), - mock_modify(format!("10{}", $d), 110), - mock_delete(format!("11{}", $d)), - mock_create(format!("12{}", $d), 112), - ] - }; -} macro_rules! expected_resource_write_set { ($d:ident) => { BTreeMap::from([ @@ -147,30 +122,11 @@ macro_rules! expected_resource_write_set { }; } -macro_rules! expected_module_write_set { - ($d:ident) => { - BTreeMap::from([ - mock_create(format!("0{}", $d), 0), - mock_modify(format!("1{}", $d), 1), - mock_delete(format!("2{}", $d)), - mock_create(format!("3{}", $d), 103), - mock_modify(format!("4{}", $d), 104), - mock_delete(format!("5{}", $d)), - mock_create(format!("7{}", $d), 107), - mock_modify(format!("10{}", $d), 110), - mock_delete(format!("11{}", $d)), - mock_modify(format!("12{}", $d), 112), - ]) - }; -} - // Populate sets according to the spec. Skip keys which lead to // errors because we test them separately. fn build_change_sets_for_test() -> (VMChangeSet, VMChangeSet) { let mut descriptor = "r"; let resource_write_set_1 = resource_write_set_1!(descriptor); - descriptor = "m"; - let module_write_set_1 = module_write_set_1!(descriptor); let aggregator_write_set_1 = vec![mock_create("18a", 18), mock_modify("19a", 19)]; let aggregator_delta_set_1 = vec![ mock_add("15a", 15), @@ -180,15 +136,12 @@ fn build_change_sets_for_test() -> (VMChangeSet, VMChangeSet) { ]; let change_set_1 = VMChangeSetBuilder::new() .with_resource_write_set(resource_write_set_1) - .with_module_write_set(module_write_set_1) .with_aggregator_v1_write_set(aggregator_write_set_1) .with_aggregator_v1_delta_set(aggregator_delta_set_1) .build(); descriptor = "r"; let resource_write_set_2 = resource_write_set_2!(descriptor); - descriptor = "m"; - let module_write_set_2 = module_write_set_2!(descriptor); let aggregator_write_set_2 = vec![mock_modify("22a", 122), mock_delete("23a")]; let aggregator_delta_set_2 = vec![ mock_add("16a", 116), @@ -198,7 +151,6 @@ fn build_change_sets_for_test() -> (VMChangeSet, VMChangeSet) { ]; let change_set_2 = VMChangeSetBuilder::new() .with_resource_write_set(resource_write_set_2) - .with_module_write_set(module_write_set_2) .with_aggregator_v1_write_set(aggregator_write_set_2) .with_aggregator_v1_delta_set(aggregator_delta_set_2) .build(); @@ -209,20 +161,13 @@ fn build_change_sets_for_test() -> (VMChangeSet, VMChangeSet) { #[test] fn test_successful_squash() { let (mut change_set, additional_change_set) = build_change_sets_for_test(); - assert_ok!( - change_set.squash_additional_change_set(additional_change_set, &MockChangeSetChecker) - ); + assert_ok!(change_set.squash_additional_change_set(additional_change_set,)); - let mut descriptor = "r"; + let descriptor = "r"; assert_eq!( change_set.resource_write_set(), &expected_resource_write_set!(descriptor) ); - descriptor = "m"; - assert_eq!( - change_set.module_write_set(), - &expected_module_write_set!(descriptor) - ); let expected_aggregator_write_set = BTreeMap::from([ mock_create("18a", 136), @@ -264,15 +209,7 @@ macro_rules! assert_invariant_violation { let cs2 = VMChangeSetBuilder::new() .with_resource_write_set($w2.clone()) .build(); - let res = cs1.squash_additional_change_set(cs2, &MockChangeSetChecker); - check(res); - let mut cs1 = VMChangeSetBuilder::new() - .with_module_write_set($w3.clone()) - .build(); - let cs2 = VMChangeSetBuilder::new() - .with_module_write_set($w4.clone()) - .build(); - let res = cs1.squash_additional_change_set(cs2, &MockChangeSetChecker); + let res = cs1.squash_additional_change_set(cs2); check(res); let mut cs1 = VMChangeSetBuilder::new() .with_aggregator_v1_write_set($w3.clone()) @@ -280,7 +217,7 @@ macro_rules! assert_invariant_violation { let cs2 = VMChangeSetBuilder::new() .with_aggregator_v1_write_set($w4.clone()) .build(); - let res = cs1.squash_additional_change_set(cs2, &MockChangeSetChecker); + let res = cs1.squash_additional_change_set(cs2); check(res); }; } @@ -337,7 +274,7 @@ fn test_unsuccessful_squash_delete_delta() { let additional_change_set = VMChangeSetBuilder::new() .with_aggregator_v1_delta_set(aggregator_delta_set_2) .build(); - let res = change_set.squash_additional_change_set(additional_change_set, &MockChangeSetChecker); + let res = change_set.squash_additional_change_set(additional_change_set); let err = assert_err!(res); assert_eq!( err.major_status(), @@ -357,7 +294,7 @@ fn test_unsuccessful_squash_delta_create() { let additional_change_set = VMChangeSetBuilder::new() .with_aggregator_v1_write_set(aggregator_write_set_2) .build(); - let res = change_set.squash_additional_change_set(additional_change_set, &MockChangeSetChecker); + let res = change_set.squash_additional_change_set(additional_change_set); let err = assert_err!(res); assert_eq!( err.major_status(), @@ -385,13 +322,13 @@ fn test_roundtrip_to_storage_change_set() { .unwrap(); let storage_change_set_before = StorageChangeSet::new(write_set, vec![]); - let change_set = assert_ok!( - VMChangeSet::try_from_storage_change_set_with_delayed_field_optimization_disabled( + let (change_set, module_write_set) = + create_vm_change_set_with_module_write_set_when_delayed_field_optimization_disabled( storage_change_set_before.clone(), - &MockChangeSetChecker, - ) - ); - let storage_change_set_after = assert_ok!(change_set.try_into_storage_change_set()); + ); + + let storage_change_set_after = + assert_ok!(change_set.try_combine_into_storage_change_set(module_write_set)); assert_eq!(storage_change_set_before, storage_change_set_after) } @@ -405,7 +342,7 @@ fn test_failed_conversion_to_change_set() { .build(); // Unchecked conversion ignores deltas. - let vm_status = change_set.try_into_storage_change_set(); + let vm_status = change_set.try_combine_into_storage_change_set(ModuleWriteSet::empty()); assert_matches!(vm_status, Err(PanicError::CodeInvariantError(_))); } @@ -418,7 +355,9 @@ fn test_conversion_to_change_set_fails() { .with_aggregator_v1_delta_set(aggregator_delta_set) .build(); - assert_err!(change_set.clone().try_into_storage_change_set()); + assert_err!(change_set + .clone() + .try_combine_into_storage_change_set(ModuleWriteSet::empty())); } #[test] @@ -465,7 +404,7 @@ fn test_aggregator_v2_snapshots_and_derived() { .with_delayed_field_change_set(agg_changes_2) .build(); - assert_ok!(change_set_1.squash_additional_change_set(change_set_2, &MockChangeSetChecker)); + assert_ok!(change_set_1.squash_additional_change_set(change_set_2,)); let output_map = change_set_1.delayed_field_change_set(); assert_eq!(output_map.len(), 3); @@ -565,9 +504,7 @@ fn test_resource_groups_squashing() { { let mut change_set = create_tag_0.clone(); - assert_ok!( - change_set.squash_additional_change_set(modify_tag_0.clone(), &MockChangeSetChecker) - ); + assert_ok!(change_set.squash_additional_change_set(modify_tag_0.clone(),)); assert_eq!(change_set.resource_write_set().len(), 1); // create(x)+modify(y) becomes create(y) assert_some_eq!( @@ -583,9 +520,7 @@ fn test_resource_groups_squashing() { { let mut change_set = create_tag_0.clone(); - assert_ok!( - change_set.squash_additional_change_set(create_tag_1.clone(), &MockChangeSetChecker) - ); + assert_ok!(change_set.squash_additional_change_set(create_tag_1.clone(),)); assert_eq!(change_set.resource_write_set().len(), 1); assert_some_eq!( change_set.resource_write_set().get(&as_state_key!("1")), @@ -597,9 +532,7 @@ fn test_resource_groups_squashing() { )) ); - assert_ok!( - change_set.squash_additional_change_set(modify_tag_1.clone(), &MockChangeSetChecker) - ); + assert_ok!(change_set.squash_additional_change_set(modify_tag_1.clone(),)); assert_eq!(change_set.resource_write_set().len(), 1); // create(x)+modify(y) becomes create(y) assert_some_eq!( @@ -615,9 +548,7 @@ fn test_resource_groups_squashing() { { let mut change_set = create_tag_0.clone(); - assert_ok!( - change_set.squash_additional_change_set(modify_tag_1.clone(), &MockChangeSetChecker) - ); + assert_ok!(change_set.squash_additional_change_set(modify_tag_1.clone(),)); assert_eq!(change_set.resource_write_set().len(), 1); assert_some_eq!( change_set.resource_write_set().get(&as_state_key!("1")), @@ -640,7 +571,6 @@ fn test_resource_groups_squashing() { (modification_metadata.metadata().clone(), 400) )]) .build(), - &MockChangeSetChecker )); } } diff --git a/aptos-move/aptos-vm-types/src/tests/test_output.rs b/aptos-move/aptos-vm-types/src/tests/test_output.rs index 381a0b83404db..4b99f02053e6d 100644 --- a/aptos-move/aptos-vm-types/src/tests/test_output.rs +++ b/aptos-move/aptos-vm-types/src/tests/test_output.rs @@ -16,7 +16,6 @@ use std::collections::BTreeMap; fn assert_eq_outputs(vm_output: &VMOutput, txn_output: TransactionOutput) { let vm_output_writes = &vm_output - .change_set() .concrete_write_set_iter() .map(|(k, v)| { ( @@ -97,23 +96,18 @@ fn test_ok_output_equality_with_deltas() { let expected_aggregator_write_set = BTreeMap::from([mock_modify("2", 2), mock_modify("3", 400)]); assert_eq!( - materialized_vm_output.change_set().resource_write_set(), - vm_output.change_set().resource_write_set() + materialized_vm_output.resource_write_set(), + vm_output.resource_write_set() ); assert_eq!( - materialized_vm_output.change_set().module_write_set(), - vm_output.change_set().module_write_set() + materialized_vm_output.module_write_set(), + vm_output.module_write_set() ); assert_eq!( - materialized_vm_output - .change_set() - .aggregator_v1_write_set(), + materialized_vm_output.aggregator_v1_write_set(), &expected_aggregator_write_set ); - assert!(materialized_vm_output - .change_set() - .aggregator_v1_delta_set() - .is_empty()); + assert!(materialized_vm_output.aggregator_v1_delta_set().is_empty()); assert_eq!( vm_output.fee_statement(), materialized_vm_output.fee_statement() diff --git a/aptos-move/aptos-vm-types/src/tests/utils.rs b/aptos-move/aptos-vm-types/src/tests/utils.rs index 1161ba80a814a..998c43f37a887 100644 --- a/aptos-move/aptos-vm-types/src/tests/utils.rs +++ b/aptos-move/aptos-vm-types/src/tests/utils.rs @@ -4,7 +4,6 @@ use crate::{ abstract_write_op::{AbstractResourceWriteOp, GroupWrite}, change_set::VMChangeSet, - check_change_set::CheckChangeSet, output::VMOutput, }; use aptos_aggregator::{ @@ -20,6 +19,7 @@ use aptos_types::{ transaction::{ExecutionStatus, TransactionAuxiliaryData, TransactionStatus}, write_set::WriteOp, }; +use move_binary_format::errors::PartialVMResult; use move_core_types::{ identifier::Identifier, language_storage::{StructTag, TypeTag}, @@ -28,14 +28,6 @@ use move_core_types::{ use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{collections::BTreeMap, sync::Arc}; -pub(crate) struct MockChangeSetChecker; - -impl CheckChangeSet for MockChangeSetChecker { - fn check_change_set(&self, _change_set: &VMChangeSet) -> PartialVMResult<()> { - Ok(()) - } -} - macro_rules! as_state_key { ($k:ident) => { StateKey::raw($k.to_string().as_bytes()) @@ -55,8 +47,8 @@ macro_rules! as_bytes { }; } +use crate::module_write_set::ModuleWriteSet; pub(crate) use as_bytes; -use move_binary_format::errors::PartialVMResult; pub(crate) fn raw_metadata(v: u64) -> StateValueMetadata { StateValueMetadata::legacy(v, &CurrentTimeMicroseconds { microseconds: v }) @@ -152,7 +144,6 @@ pub(crate) fn mock_tag_2() -> StructTag { pub(crate) struct VMChangeSetBuilder { resource_write_set: BTreeMap, - module_write_set: BTreeMap, events: Vec<(ContractEvent, Option)>, delayed_field_change_set: BTreeMap>, aggregator_v1_write_set: BTreeMap, @@ -163,7 +154,6 @@ impl VMChangeSetBuilder { pub(crate) fn new() -> Self { Self { resource_write_set: BTreeMap::new(), - module_write_set: BTreeMap::new(), events: vec![], delayed_field_change_set: BTreeMap::new(), aggregator_v1_write_set: BTreeMap::new(), @@ -180,15 +170,6 @@ impl VMChangeSetBuilder { self } - pub(crate) fn with_module_write_set( - mut self, - module_write_set: impl IntoIterator, - ) -> Self { - assert!(self.module_write_set.is_empty()); - self.module_write_set.extend(module_write_set); - self - } - #[allow(dead_code)] pub(crate) fn with_events( mut self, @@ -232,14 +213,11 @@ impl VMChangeSetBuilder { pub(crate) fn build(self) -> VMChangeSet { VMChangeSet::new( self.resource_write_set, - self.module_write_set, self.events, self.delayed_field_change_set, self.aggregator_v1_write_set, self.aggregator_v1_delta_set, - &MockChangeSetChecker, ) - .unwrap() } } @@ -256,11 +234,11 @@ pub(crate) fn build_vm_output( VMOutput::new( VMChangeSetBuilder::new() .with_resource_write_set(resource_write_set) - .with_module_write_set(module_write_set) .with_delayed_field_change_set(delayed_field_change_set) .with_aggregator_v1_write_set(aggregator_v1_write_set) .with_aggregator_v1_delta_set(aggregator_v1_delta_set) .build(), + ModuleWriteSet::new(false, module_write_set.into_iter().collect()), FeeStatement::new(GAS_USED, GAS_USED, 0, 0, 0), STATUS, TransactionAuxiliaryData::default(), @@ -270,7 +248,6 @@ pub(crate) fn build_vm_output( pub(crate) struct ExpandedVMChangeSetBuilder { resource_write_set: BTreeMap>)>, resource_group_write_set: BTreeMap, - module_write_set: BTreeMap, aggregator_v1_write_set: BTreeMap, aggregator_v1_delta_set: BTreeMap, delayed_field_change_set: BTreeMap>, @@ -286,7 +263,6 @@ impl ExpandedVMChangeSetBuilder { Self { resource_write_set: BTreeMap::new(), resource_group_write_set: BTreeMap::new(), - module_write_set: BTreeMap::new(), aggregator_v1_write_set: BTreeMap::new(), aggregator_v1_delta_set: BTreeMap::new(), delayed_field_change_set: BTreeMap::new(), @@ -315,15 +291,6 @@ impl ExpandedVMChangeSetBuilder { self } - pub(crate) fn with_module_write_set( - mut self, - module_write_set: impl IntoIterator, - ) -> Self { - assert!(self.module_write_set.is_empty()); - self.module_write_set.extend(module_write_set); - self - } - pub(crate) fn with_aggregator_v1_write_set( mut self, aggregator_v1_write_set: impl IntoIterator, @@ -391,14 +358,12 @@ impl ExpandedVMChangeSetBuilder { VMChangeSet::new_expanded( self.resource_write_set, self.resource_group_write_set, - self.module_write_set, self.aggregator_v1_write_set, self.aggregator_v1_delta_set, self.delayed_field_change_set, self.reads_needing_delayed_field_exchange, self.group_reads_needing_delayed_field_exchange, self.events, - &MockChangeSetChecker, ) } diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 1b68765170e0c..0466f47f250a7 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -11,7 +11,10 @@ use crate::{ keyless_validation, move_vm_ext::{ session::user_transaction_sessions::{ - abort_hook::AbortHookSession, epilogue::EpilogueSession, prologue::PrologueSession, + abort_hook::AbortHookSession, + epilogue::EpilogueSession, + prologue::PrologueSession, + session_change_sets::{SystemSessionChangeSet, UserSessionChangeSet}, user::UserSession, }, AptosMoveResolver, MoveVmExt, SessionExt, SessionId, UserTransactionContext, @@ -46,6 +49,7 @@ use aptos_types::{ block_metadata::BlockMetadata, block_metadata_ext::{BlockMetadataExt, BlockMetadataWithRandomness}, chain_id::ChainId, + contract_event::ContractEvent, fee_statement::FeeStatement, move_utils::as_move_value::AsMoveValue, on_chain_config::{ @@ -53,22 +57,26 @@ use aptos_types::{ OnChainConfig, TimedFeatureFlag, TimedFeatures, }, randomness::Randomness, - state_store::{StateView, TStateView}, + state_store::{state_key::StateKey, StateView, TStateView}, transaction::{ authenticator::AnySignature, signature_verified_transaction::SignatureVerifiedTransaction, BlockOutput, EntryFunction, ExecutionError, ExecutionStatus, ModuleBundle, Multisig, - MultisigTransactionPayload, Script, SignedTransaction, Transaction, + MultisigTransactionPayload, Script, SignedTransaction, Transaction, TransactionArgument, TransactionAuxiliaryData, TransactionOutput, TransactionPayload, TransactionStatus, VMValidatorResult, ViewFunctionOutput, WriteSetPayload, }, vm_status::{AbortLocation, StatusCode, VMStatus}, }; -use aptos_utils::{aptos_try, return_on_failure}; +use aptos_utils::aptos_try; use aptos_vm_logging::{log_schema::AdapterLogSchema, speculative_error, speculative_log}; use aptos_vm_types::{ abstract_write_op::AbstractResourceWriteOp, - change_set::VMChangeSet, + change_set::{ + create_vm_change_set_with_module_write_set_when_delayed_field_optimization_disabled, + ChangeSetInterface, VMChangeSet, + }, environment::Environment, + module_write_set::ModuleWriteSet, output::VMOutput, resolver::{ExecutorView, ResourceGroupView}, storage::{change_set_configs::ChangeSetConfigs, StorageGasParameters}, @@ -82,6 +90,7 @@ use move_binary_format::{ compatibility::Compatibility, deserializer::DeserializerConfig, errors::{Location, PartialVMError, PartialVMResult, VMError, VMResult}, + file_format::CompiledScript, CompiledModule, }; use move_core_types::{ @@ -91,7 +100,7 @@ use move_core_types::{ language_storage::{ModuleId, TypeTag}, move_resource::MoveStructType, transaction_argument::convert_txn_args, - value::{serialize_values, MoveValue}, + value::{serialize_values, MoveTypeLayout, MoveValue}, vm_status::StatusType, }; use move_vm_runtime::{ @@ -100,7 +109,7 @@ use move_vm_runtime::{ }; use move_vm_types::gas::{GasMeter, UnmeteredGasMeter}; use num_cpus; -use once_cell::sync::{Lazy, OnceCell}; +use once_cell::sync::OnceCell; use std::{ cmp::{max, min}, collections::{BTreeMap, BTreeSet}, @@ -114,17 +123,6 @@ static NUM_PROOF_READING_THREADS: OnceCell = OnceCell::new(); static DISCARD_FAILED_BLOCKS: OnceCell = OnceCell::new(); static PROCESSED_TRANSACTIONS_DETAILED_COUNTERS: OnceCell = OnceCell::new(); -// TODO: Don't expose this in AptosVM, and use only in BlockAptosVM! -pub static RAYON_EXEC_POOL: Lazy> = Lazy::new(|| { - Arc::new( - rayon::ThreadPoolBuilder::new() - .num_threads(num_cpus::get()) - .thread_name(|index| format!("par_exec-{}", index)) - .build() - .unwrap(), - ) -}); - macro_rules! deprecated_module_bundle { () => { VMStatus::error( @@ -151,32 +149,28 @@ macro_rules! unwrap_or_discard { pub(crate) fn get_system_transaction_output( session: SessionExt, - fee_statement: FeeStatement, - status: ExecutionStatus, change_set_configs: &ChangeSetConfigs, ) -> Result { - get_transaction_output( - session, - fee_statement, - status, - change_set_configs, - TransactionAuxiliaryData::default(), - ) -} + let (change_set, empty_module_write_set) = session.finish(change_set_configs)?; + + // System transactions can never publish modules! When we move publishing outside MoveVM, we do not + // need to have this check here, as modules will only be visible in user session. + empty_module_write_set + .is_empty_or_invariant_violation() + .map_err(|e| { + e.with_message( + "Non-empty module write set in when creating system transaction output".to_string(), + ) + .finish(Location::Undefined) + .into_vm_status() + })?; -pub(crate) fn get_transaction_output( - session: SessionExt, - fee_statement: FeeStatement, - status: ExecutionStatus, - change_set_configs: &ChangeSetConfigs, - auxiliary_data: TransactionAuxiliaryData, -) -> Result { - let change_set = session.finish(change_set_configs)?; Ok(VMOutput::new( change_set, - fee_statement, - TransactionStatus::Keep(status), - auxiliary_data, + ModuleWriteSet::empty(), + FeeStatement::zero(), + TransactionStatus::Keep(ExecutionStatus::Success), + TransactionAuxiliaryData::default(), )) } @@ -226,21 +220,37 @@ impl AptosVM { /// Creates a new VM instance, initializing the runtime environment from the state. pub fn new(state_view: &impl StateView) -> Self { let env = Arc::new(Environment::new(state_view)); - Self::new_with_environment(env, state_view) + Self::new_with_environment(env, state_view, false) + } + + pub fn new_for_gov_sim(state_view: &impl StateView) -> Self { + let env = Arc::new(Environment::new(state_view)); + Self::new_with_environment(env, state_view, true) } /// Creates a new VM instance based on the runtime environment, and used by block /// executor to create multiple tasks sharing the same execution configurations. // TODO: Passing `state_view` is not needed once we move keyless and gas-related // configs to the environment. - pub(crate) fn new_with_environment(env: Arc, state_view: &impl StateView) -> Self { + pub(crate) fn new_with_environment( + env: Arc, + state_view: &impl StateView, + inject_create_signer_for_gov_sim: bool, + ) -> Self { let _timer = TIMER.timer_with(&["AptosVM::new"]); let (gas_params, storage_gas_params, gas_feature_version) = get_gas_parameters(env.features(), state_view); let resolver = state_view.as_move_resolver(); - let move_vm = MoveVmExt::new(gas_feature_version, gas_params.as_ref(), env, &resolver); + let move_vm = MoveVmExt::new_with_extended_options( + gas_feature_version, + gas_params.as_ref(), + env, + None, + inject_create_signer_for_gov_sim, + &resolver, + ); // We use an `Option` to handle the VK not being set on-chain, or an incorrect VK being set // via governance (although, currently, we do check for that in `keyless_account.move`). @@ -412,7 +422,7 @@ impl AptosVM { pub(crate) fn failed_transaction_cleanup( &self, - prologue_change_set: VMChangeSet, + prologue_session_change_set: SystemSessionChangeSet, error_vm_status: VMStatus, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, @@ -458,25 +468,20 @@ impl AptosVM { // gas). Even if the previous failure occurred while running the epilogue, it // should not fail now. If it somehow fails here, there is no choice but to // discard the transaction. - let txn_output = match self.finish_aborted_transaction( - prologue_change_set, - gas_meter, - txn_data, - resolver, - status, - log_context, - change_set_configs, - traversal_context, - ) { - Ok((change_set, fee_statement, status)) => VMOutput::new( - change_set, - fee_statement, - TransactionStatus::Keep(status), + let output = self + .finish_aborted_transaction( + prologue_session_change_set, + gas_meter, + txn_data, + resolver, + status, txn_aux_data, - ), - Err(err) => discarded_output(err.status_code()), - }; - (error_vm_status, txn_output) + log_context, + change_set_configs, + traversal_context, + ) + .unwrap_or_else(|status| discarded_output(status.status_code())); + (error_vm_status, output) }, TransactionStatus::Discard(status_code) => { let discarded_output = discarded_output(status_code); @@ -508,142 +513,135 @@ impl AptosVM { fn finish_aborted_transaction( &self, - prologue_change_set: VMChangeSet, + prologue_session_change_set: SystemSessionChangeSet, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, resolver: &impl AptosMoveResolver, status: ExecutionStatus, + txn_aux_data: TransactionAuxiliaryData, log_context: &AdapterLogSchema, change_set_configs: &ChangeSetConfigs, traversal_context: &mut TraversalContext, - ) -> Result<(VMChangeSet, FeeStatement, ExecutionStatus), VMStatus> { + ) -> Result { // Storage refund is zero since no slots are deleted in aborted transactions. const ZERO_STORAGE_REFUND: u64 = 0; let is_account_init_for_sponsored_transaction = is_account_init_for_sponsored_transaction(txn_data, self.features(), resolver)?; - if is_account_init_for_sponsored_transaction { - let mut abort_hook_session = - AbortHookSession::new(self, txn_data, resolver, prologue_change_set)?; - // Abort information is injected using the user defined error in the Move contract. - let status = self.inject_abort_info_if_available(status); + let (previous_session_change_set, fee_statement) = + if is_account_init_for_sponsored_transaction { + let mut abort_hook_session = + AbortHookSession::new(self, txn_data, resolver, prologue_session_change_set); - abort_hook_session.execute(|session| { - create_account_if_does_not_exist( - session, - gas_meter, - txn_data.sender(), - traversal_context, - ) - // if this fails, it is likely due to out of gas, so we try again without metering - // and then validate below that we charged sufficiently. - .or_else(|_err| { + abort_hook_session.execute(|session| { create_account_if_does_not_exist( session, - &mut UnmeteredGasMeter, + gas_meter, txn_data.sender(), traversal_context, ) - }) - .map_err(expect_no_verification_errors) - .or_else(|err| { - expect_only_successful_execution( + // If this fails, it is likely due to out of gas, so we try again without metering + // and then validate below that we charged sufficiently. + .or_else(|_err| { + create_account_if_does_not_exist( + session, + &mut UnmeteredGasMeter, + txn_data.sender(), + traversal_context, + ) + }) + .map_err(expect_no_verification_errors) + .or_else(|err| { + expect_only_successful_execution( + err, + &format!("{:?}::{}", ACCOUNT_MODULE, CREATE_ACCOUNT_IF_DOES_NOT_EXIST), + log_context, + ) + }) + })?; + + let mut abort_hook_session_change_set = + abort_hook_session.finish(change_set_configs)?; + if let Err(err) = self.charge_change_set( + &mut abort_hook_session_change_set, + gas_meter, + txn_data, + resolver, + ) { + info!( + *log_context, + "Failed during charge_change_set: {:?}. Most likely exceeded gas limited.", err, - &format!("{:?}::{}", ACCOUNT_MODULE, CREATE_ACCOUNT_IF_DOES_NOT_EXIST), - log_context, - ) - }) - })?; + ); + }; - let mut change_set = abort_hook_session.finish(change_set_configs)?; - if let Err(err) = self.charge_change_set(&mut change_set, gas_meter, txn_data, resolver) - { - info!( - *log_context, - "Failed during charge_change_set: {:?}. Most likely exceeded gas limited.", err, + let fee_statement = + AptosVM::fee_statement_from_gas_meter(txn_data, gas_meter, ZERO_STORAGE_REFUND); + + // Verify we charged sufficiently for creating an account slot + let gas_params = get_or_vm_startup_failure(&self.gas_params, log_context)?; + let gas_unit_price = u64::from(txn_data.gas_unit_price()); + let gas_used = fee_statement.gas_used(); + let storage_fee = fee_statement.storage_fee_used(); + let storage_refund = fee_statement.storage_fee_refund(); + + let actual = gas_used * gas_unit_price + storage_fee - storage_refund; + let expected = u64::from( + gas_meter + .disk_space_pricing() + .hack_account_creation_fee_lower_bound(&gas_params.vm.txn), ); + if actual < expected { + expect_only_successful_execution( + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message( + "Insufficient fee for storing account for sponsored transaction" + .to_string(), + ) + .finish(Location::Undefined), + &format!("{:?}::{}", ACCOUNT_MODULE, CREATE_ACCOUNT_IF_DOES_NOT_EXIST), + log_context, + )?; + } + (abort_hook_session_change_set, fee_statement) + } else { + let fee_statement = + AptosVM::fee_statement_from_gas_meter(txn_data, gas_meter, ZERO_STORAGE_REFUND); + (prologue_session_change_set, fee_statement) }; - let fee_statement = - AptosVM::fee_statement_from_gas_meter(txn_data, gas_meter, ZERO_STORAGE_REFUND); - - // Verify we charged sufficiently for creating an account slot - let gas_params = get_or_vm_startup_failure(&self.gas_params, log_context)?; - let gas_unit_price = u64::from(txn_data.gas_unit_price()); - let gas_used = fee_statement.gas_used(); - let storage_fee = fee_statement.storage_fee_used(); - let storage_refund = fee_statement.storage_fee_refund(); - - let actual = gas_used * gas_unit_price + storage_fee - storage_refund; - let expected = u64::from( - gas_meter - .disk_space_pricing() - .hack_account_creation_fee_lower_bound(&gas_params.vm.txn), - ); - if actual < expected { - expect_only_successful_execution( - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message( - "Insufficient fee for storing account for sponsored transaction" - .to_string(), - ) - .finish(Location::Undefined), - &format!("{:?}::{}", ACCOUNT_MODULE, CREATE_ACCOUNT_IF_DOES_NOT_EXIST), - log_context, - )?; - } + let mut epilogue_session = EpilogueSession::on_user_session_failure( + self, + txn_data, + resolver, + previous_session_change_set, + ); - let mut epilogue_session = EpilogueSession::new( - self, - txn_data, - resolver, - change_set, - ZERO_STORAGE_REFUND.into(), - )?; + // Abort information is injected using the user defined error in the Move contract. + // + // DO NOT move abort info injection before we create an epilogue session, because if + // there is a code publishing transaction that fails, it will invalidate VM loader + // cache which is flushed ONLY WHEN THE NEXT SESSION IS CREATED! + // Also, do not move this after we run failure epilogue below, because this will load + // module, which alters abort info. We have a transaction at version 596888095 which + // relies on this specific behavior... + let status = self.inject_abort_info_if_available(status); - epilogue_session.execute(|session| { - transaction_validation::run_failure_epilogue( - session, - gas_meter.balance(), - fee_statement, - self.features(), - txn_data, - log_context, - traversal_context, - ) - })?; - epilogue_session - .finish(change_set_configs) - .map(|set| (set, fee_statement, status)) - } else { - let mut epilogue_session = EpilogueSession::new( - self, + epilogue_session.execute(|session| { + transaction_validation::run_failure_epilogue( + session, + gas_meter.balance(), + fee_statement, + self.features(), txn_data, - resolver, - prologue_change_set, - ZERO_STORAGE_REFUND.into(), - )?; - - let status = self.inject_abort_info_if_available(status); + log_context, + traversal_context, + self.is_simulation, + ) + })?; - let fee_statement = - AptosVM::fee_statement_from_gas_meter(txn_data, gas_meter, ZERO_STORAGE_REFUND); - epilogue_session.execute(|session| { - transaction_validation::run_failure_epilogue( - session, - gas_meter.balance(), - fee_statement, - self.features(), - txn_data, - log_context, - traversal_context, - ) - })?; - epilogue_session - .finish(change_set_configs) - .map(|set| (set, fee_statement, status)) - } + epilogue_session.finish(fee_statement, status, txn_aux_data, change_set_configs) } fn success_transaction_cleanup( @@ -654,6 +652,7 @@ impl AptosVM { log_context: &AdapterLogSchema, change_set_configs: &ChangeSetConfigs, traversal_context: &mut TraversalContext, + has_modules_published_to_special_address: bool, ) -> Result<(VMStatus, VMOutput), VMStatus> { if self.gas_feature_version >= 12 { // Check if the gas meter's internal counters are consistent. @@ -684,15 +683,24 @@ impl AptosVM { txn_data, log_context, traversal_context, + self.is_simulation, ) })?; - let change_set = epilogue_session.finish(change_set_configs)?; - let output = VMOutput::new( - change_set, + let output = epilogue_session.finish( fee_statement, - TransactionStatus::Keep(ExecutionStatus::Success), + ExecutionStatus::Success, TransactionAuxiliaryData::default(), - ); + change_set_configs, + )?; + + // We mark module cache invalid if transaction is successfully executed and has + // published modules. The reason is that epilogue loads the old version of code, + // and so we need to make sure the next transaction sees the new code. + // Note that we only do so for modules at special addresses - i.e., those that + // could have actually been loaded in the epilogue. + if has_modules_published_to_special_address { + self.move_vm.mark_loader_cache_as_invalid(); + } Ok((VMStatus::Executed, output)) } @@ -707,6 +715,19 @@ impl AptosVM { senders: Vec, script: &Script, ) -> Result<(), VMStatus> { + if !self + .features() + .is_enabled(FeatureFlag::ALLOW_SERIALIZED_SCRIPT_ARGS) + { + for arg in script.args() { + if let TransactionArgument::Serialized(_) = arg { + return Err(PartialVMError::new(StatusCode::FEATURE_UNDER_GATING) + .finish(Location::Script) + .into_vm_status()); + } + } + } + // Note: Feature gating is needed here because the traversal of the dependencies could // result in shallow-loading of the modules and therefore subtle changes in // the error semantics. @@ -720,11 +741,30 @@ impl AptosVM { let func = session.load_script(script.code(), script.ty_args())?; - // TODO(Gerardo): consolidate the extended validation to verifier. - verifier::event_validation::verify_no_event_emission_in_script( + let compiled_script = match CompiledScript::deserialize_with_config( script.code(), self.deserializer_config(), - )?; + ) { + Ok(script) => script, + Err(err) => { + let msg = format!("[VM] deserializer for script returned error: {:?}", err); + let partial_err = PartialVMError::new(StatusCode::CODE_DESERIALIZATION_ERROR) + .with_message(msg) + .finish(Location::Script); + return Err(partial_err.into_vm_status()); + }, + }; + + // Check that unstable bytecode cannot be executed on mainnet + if self + .features() + .is_enabled(FeatureFlag::REJECT_UNSTABLE_BYTECODE_FOR_SCRIPT) + { + self.reject_unstable_bytecode_for_script(&compiled_script)?; + } + + // TODO(Gerardo): consolidate the extended validation to verifier. + verifier::event_validation::verify_no_event_emission_in_compiled_script(&compiled_script)?; let args = verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( session, @@ -770,6 +810,22 @@ impl AptosVM { let function = session.load_function(entry_fn.module(), entry_fn.function(), entry_fn.ty_args())?; + // Native entry function is forbidden. + if self + .features() + .is_enabled(FeatureFlag::DISALLOW_USER_NATIVES) + && function.is_native() + { + return Err( + PartialVMError::new(StatusCode::USER_DEFINED_NATIVE_NOT_ALLOWED) + .with_message( + "Executing user defined native entry function is not allowed".to_string(), + ) + .finish(Location::Module(entry_fn.module().clone())) + .into_vm_status(), + ); + } + // The `has_randomness_attribute()` should have been feature-gated in 1.11... if function.is_friend_or_private() && get_randomness_annotation(resolver, session, entry_fn)?.is_some() @@ -849,20 +905,21 @@ impl AptosVM { _ => unreachable!("Only scripts or entry functions are executed"), }; - session.execute(|session| { - self.resolve_pending_code_publish( - session, - gas_meter, - traversal_context, - new_published_modules_loaded, - ) - })?; - - let epilogue_session = self.charge_change_set_and_respawn_session( + let user_session_change_set = self.resolve_pending_code_publish_and_finish_user_session( session, resolver, gas_meter, + traversal_context, + new_published_modules_loaded, change_set_configs, + )?; + let has_modules_published_to_special_address = + user_session_change_set.has_modules_published_to_special_address(); + + let epilogue_session = self.charge_change_set_and_respawn_session( + user_session_change_set, + resolver, + gas_meter, txn_data, )?; @@ -873,18 +930,19 @@ impl AptosVM { log_context, change_set_configs, traversal_context, + has_modules_published_to_special_address, ) } fn charge_change_set( &self, - change_set: &mut VMChangeSet, + change_set: &mut impl ChangeSetInterface, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, resolver: &impl AptosMoveResolver, ) -> Result, VMStatus> { gas_meter.charge_io_gas_for_transaction(txn_data.transaction_size())?; - for (event, _layout) in change_set.events() { + for event in change_set.events_iter() { gas_meter.charge_io_gas_for_event(event)?; } for (key, op_size) in change_set.write_set_size_iter() { @@ -906,25 +964,28 @@ impl AptosVM { fn charge_change_set_and_respawn_session<'r, 'l>( &'l self, - user_session: UserSession<'r, 'l>, + mut user_session_change_set: UserSessionChangeSet, resolver: &'r impl AptosMoveResolver, gas_meter: &mut impl AptosGasMeter, - change_set_configs: &ChangeSetConfigs, txn_data: &'l TransactionMetadata, ) -> Result, VMStatus> { - let mut change_set = user_session.finish(change_set_configs)?; - let storage_refund = - self.charge_change_set(&mut change_set, gas_meter, txn_data, resolver)?; + self.charge_change_set(&mut user_session_change_set, gas_meter, txn_data, resolver)?; // TODO[agg_v1](fix): Charge for aggregator writes - EpilogueSession::new(self, txn_data, resolver, change_set, storage_refund) + Ok(EpilogueSession::on_user_session_success( + self, + txn_data, + resolver, + user_session_change_set, + storage_refund, + )) } fn simulate_multisig_transaction<'a, 'r, 'l>( &'l self, resolver: &'r impl AptosMoveResolver, - mut session: UserSession<'r, 'l>, + session: UserSession<'r, 'l>, gas_meter: &mut impl AptosGasMeter, traversal_context: &mut TraversalContext<'a>, txn_data: &TransactionMetadata, @@ -939,26 +1000,28 @@ impl AptosVM { match multisig_payload { MultisigTransactionPayload::EntryFunction(entry_function) => { aptos_try!({ - return_on_failure!(session.execute(|session| self - .execute_multisig_entry_function( - resolver, - session, - gas_meter, - traversal_context, - payload.multisig_address, - entry_function, - new_published_modules_loaded, - txn_data, - ))); + let user_session_change_set = self.execute_multisig_entry_function( + resolver, + session, + gas_meter, + traversal_context, + payload.multisig_address, + entry_function, + new_published_modules_loaded, + txn_data, + change_set_configs, + )?; + let has_modules_published_to_special_address = + user_session_change_set.has_modules_published_to_special_address(); + // TODO: Deduplicate this against execute_multisig_transaction // A bit tricky since we need to skip success/failure cleanups, // which is in the middle. Introducing a boolean would make the code // messier. let epilogue_session = self.charge_change_set_and_respawn_session( - session, + user_session_change_set, resolver, gas_meter, - change_set_configs, txn_data, )?; @@ -969,6 +1032,7 @@ impl AptosVM { log_context, change_set_configs, traversal_context, + has_modules_published_to_special_address, ) }) }, @@ -988,7 +1052,7 @@ impl AptosVM { &'l self, resolver: &'r impl AptosMoveResolver, mut session: UserSession<'r, 'l>, - prologue_change_set: &VMChangeSet, + prologue_session_change_set: &SystemSessionChangeSet, gas_meter: &mut impl AptosGasMeter, traversal_context: &mut TraversalContext, txn_data: &TransactionMetadata, @@ -1073,20 +1137,18 @@ impl AptosVM { // changes are not persisted. // The multisig transaction would still be considered executed even if execution fails. let execution_result = match payload { - MultisigTransactionPayload::EntryFunction(entry_function) => { - session.execute(|session| { - self.execute_multisig_entry_function( - resolver, - session, - gas_meter, - traversal_context, - txn_payload.multisig_address, - &entry_function, - new_published_modules_loaded, - txn_data, - ) - }) - }, + MultisigTransactionPayload::EntryFunction(entry_function) => self + .execute_multisig_entry_function( + resolver, + session, + gas_meter, + traversal_context, + txn_payload.multisig_address, + &entry_function, + new_published_modules_loaded, + txn_data, + change_set_configs, + ), }; // Step 3: Call post transaction cleanup function in multisig account module with the result @@ -1098,33 +1160,54 @@ impl AptosVM { MoveValue::Address(txn_payload.multisig_address), MoveValue::vector_u8(payload_bytes), ]); - let epilogue_session = if let Err(execution_error) = execution_result { - // Invalidate the loader cache in case there was a new module loaded from a module - // publish request that failed. - // This is redundant with the logic in execute_user_transaction but unfortunately is - // necessary here as executing the underlying call can fail without this function - // returning an error to execute_user_transaction. - if *new_published_modules_loaded { - self.move_vm.mark_loader_cache_as_invalid(); - }; - self.failure_multisig_payload_cleanup( - resolver, - prologue_change_set, - execution_error, - txn_data, - cleanup_args, - traversal_context, - )? - } else { - self.success_multisig_payload_cleanup( - resolver, - session, - gas_meter, - txn_data, - cleanup_args, - change_set_configs, - traversal_context, - )? + + let (epilogue_session, has_modules_published_to_special_address) = match execution_result { + Err(execution_error) => { + // Invalidate the loader cache in case there was a new module loaded from a module + // publish request that failed. + // This is redundant with the logic in execute_user_transaction but unfortunately is + // necessary here as executing the underlying call can fail without this function + // returning an error to execute_user_transaction. + if *new_published_modules_loaded { + self.move_vm.mark_loader_cache_as_invalid(); + }; + let epilogue_session = self.failure_multisig_payload_cleanup( + resolver, + prologue_session_change_set, + execution_error, + txn_data, + cleanup_args, + traversal_context, + )?; + (epilogue_session, false) + }, + Ok(user_session_change_set) => { + let has_modules_published_to_special_address = + user_session_change_set.has_modules_published_to_special_address(); + + // Charge gas for write set before we do cleanup. This ensures we don't charge gas for + // cleanup write set changes, which is consistent with outer-level success cleanup + // flow. We also wouldn't need to worry that we run out of gas when doing cleanup. + let mut epilogue_session = self.charge_change_set_and_respawn_session( + user_session_change_set, + resolver, + gas_meter, + txn_data, + )?; + epilogue_session.execute(|session| { + session + .execute_function_bypass_visibility( + &MULTISIG_ACCOUNT_MODULE, + SUCCESSFUL_TRANSACTION_EXECUTION_CLEANUP, + vec![], + cleanup_args, + &mut UnmeteredGasMeter, + traversal_context, + ) + .map_err(|e| e.into_vm_status()) + })?; + (epilogue_session, has_modules_published_to_special_address) + }, }; // TODO(Gas): Charge for aggregator writes @@ -1135,6 +1218,7 @@ impl AptosVM { log_context, change_set_configs, traversal_context, + has_modules_published_to_special_address, ) } @@ -1142,7 +1226,7 @@ impl AptosVM { &'l self, resolver: &'r impl AptosMoveResolver, session: UserSession<'r, 'l>, - proglogue_change_set: &VMChangeSet, + prologue_session_change_set: &SystemSessionChangeSet, gas_meter: &mut impl AptosGasMeter, traversal_context: &mut TraversalContext<'a>, txn_data: &TransactionMetadata, @@ -1151,7 +1235,13 @@ impl AptosVM { new_published_modules_loaded: &mut bool, change_set_configs: &ChangeSetConfigs, ) -> Result<(VMStatus, VMOutput), VMStatus> { - if self.is_simulation { + // Once `simulation_enhancement` is enabled, we use `execute_multisig_transaction` for simulation, + // deprecating `simulate_multisig_transaction`. + if self.is_simulation + && !self + .features() + .is_transaction_simulation_enhancement_enabled() + { self.simulate_multisig_transaction( resolver, session, @@ -1167,7 +1257,7 @@ impl AptosVM { self.execute_multisig_transaction( resolver, session, - proglogue_change_set, + prologue_session_change_set, gas_meter, traversal_context, txn_data, @@ -1182,76 +1272,45 @@ impl AptosVM { fn execute_multisig_entry_function( &self, resolver: &impl AptosMoveResolver, - session: &mut SessionExt, + mut session: UserSession<'_, '_>, gas_meter: &mut impl AptosGasMeter, traversal_context: &mut TraversalContext, multisig_address: AccountAddress, payload: &EntryFunction, new_published_modules_loaded: &mut bool, txn_data: &TransactionMetadata, - ) -> Result<(), VMStatus> { + change_set_configs: &ChangeSetConfigs, + ) -> Result { // If txn args are not valid, we'd still consider the transaction as executed but // failed. This is primarily because it's unrecoverable at this point. - self.validate_and_execute_entry_function( - resolver, - session, - gas_meter, - traversal_context, - vec![multisig_address], - payload, - txn_data, - )?; + session.execute(|session| { + self.validate_and_execute_entry_function( + resolver, + session, + gas_meter, + traversal_context, + vec![multisig_address], + payload, + txn_data, + ) + })?; // Resolve any pending module publishes in case the multisig transaction is deploying // modules. - self.resolve_pending_code_publish( + self.resolve_pending_code_publish_and_finish_user_session( session, + resolver, gas_meter, traversal_context, new_published_modules_loaded, - )?; - Ok(()) - } - - fn success_multisig_payload_cleanup<'r, 'l>( - &'l self, - resolver: &'r impl AptosMoveResolver, - session: UserSession<'r, 'l>, - gas_meter: &mut impl AptosGasMeter, - txn_data: &'l TransactionMetadata, - cleanup_args: Vec>, - change_set_configs: &ChangeSetConfigs, - traversal_context: &mut TraversalContext, - ) -> Result, VMStatus> { - // Charge gas for write set before we do cleanup. This ensures we don't charge gas for - // cleanup write set changes, which is consistent with outer-level success cleanup - // flow. We also wouldn't need to worry that we run out of gas when doing cleanup. - let mut epilogue_session = self.charge_change_set_and_respawn_session( - session, - resolver, - gas_meter, change_set_configs, - txn_data, - )?; - epilogue_session.execute(|session| { - session - .execute_function_bypass_visibility( - &MULTISIG_ACCOUNT_MODULE, - SUCCESSFUL_TRANSACTION_EXECUTION_CLEANUP, - vec![], - cleanup_args, - &mut UnmeteredGasMeter, - traversal_context, - ) - .map_err(|e| e.into_vm_status()) - })?; - Ok(epilogue_session) + ) } fn failure_multisig_payload_cleanup<'r, 'l>( &'l self, resolver: &'r impl AptosMoveResolver, - prologue_change_set: &VMChangeSet, + prologue_session_change_set: &SystemSessionChangeSet, execution_error: VMStatus, txn_data: &'l TransactionMetadata, mut cleanup_args: Vec>, @@ -1259,13 +1318,12 @@ impl AptosVM { ) -> Result, VMStatus> { // Start a fresh session for running cleanup that does not contain any changes from // the inner function call earlier (since it failed). - let mut epilogue_session = EpilogueSession::new( + let mut epilogue_session = EpilogueSession::on_user_session_failure( self, txn_data, resolver, - prologue_change_set.clone(), - 0.into(), - )?; + prologue_session_change_set.clone(), + ); let execution_error = ExecutionError::try_from(execution_error) .map_err(|_| VMStatus::error(StatusCode::UNREACHABLE, None))?; // Serialization is not expected to fail so we're using invariant_violation error here. @@ -1357,132 +1415,155 @@ impl AptosVM { } /// Resolve a pending code publish request registered via the NativeCodeContext. - fn resolve_pending_code_publish( + fn resolve_pending_code_publish_and_finish_user_session( &self, - session: &mut SessionExt, + mut session: UserSession<'_, '_>, + resolver: &impl AptosMoveResolver, gas_meter: &mut impl AptosGasMeter, traversal_context: &mut TraversalContext, new_published_modules_loaded: &mut bool, - ) -> VMResult<()> { - if let Some(PublishRequest { - destination, - bundle, - expected_modules, - allowed_deps, - check_compat: _, - }) = session.extract_publish_request() - { - // TODO: unfortunately we need to deserialize the entire bundle here to handle - // `init_module` and verify some deployment conditions, while the VM need to do - // the deserialization again. Consider adding an API to MoveVM which allows to - // directly pass CompiledModule. - let modules = self.deserialize_module_bundle(&bundle)?; - let modules: &Vec = - traversal_context.referenced_module_bundles.alloc(modules); - - // Note: Feature gating is needed here because the traversal of the dependencies could - // result in shallow-loading of the modules and therefore subtle changes in - // the error semantics. - if self.gas_feature_version >= 15 { - // Charge old versions of the modules, in case of upgrades. - session.check_dependencies_and_charge_gas_non_recursive_optional( - gas_meter, - traversal_context, - modules - .iter() - .map(|module| (module.self_addr(), module.self_name())), - )?; - - // Charge all modules in the bundle that is about to be published. - for (module, blob) in modules.iter().zip(bundle.iter()) { - let module_id = &module.self_id(); - gas_meter - .charge_dependency( - true, - module_id.address(), - module_id.name(), - NumBytes::new(blob.code().len() as u64), - ) - .map_err(|err| err.finish(Location::Undefined))?; - } + change_set_configs: &ChangeSetConfigs, + ) -> Result { + session.execute(|session| { + if let Some(publish_request) = session.extract_publish_request() { + let PublishRequest { + destination, + bundle, + expected_modules, + allowed_deps, + check_compat: _, + } = publish_request; + + let modules = self.deserialize_module_bundle(&bundle)?; + let modules: &Vec = + traversal_context.referenced_module_bundles.alloc(modules); + + // Note: Feature gating is needed here because the traversal of the dependencies could + // result in shallow-loading of the modules and therefore subtle changes in + // the error semantics. + if self.gas_feature_version >= 15 { + // Charge old versions of existing modules, in case of upgrades. + for module in modules.iter() { + let addr = module.self_addr(); + let name = module.self_name(); + let state_key = StateKey::module(addr, name); + + // TODO: Allow the check of special addresses to be customized. + if addr.is_special() + || traversal_context.visited.insert((addr, name), ()).is_some() + { + continue; + } + + let size_if_module_exists = resolver + .as_executor_view() + .get_module_state_value_size(&state_key) + .map_err(|e| e.finish(Location::Undefined))?; + + if let Some(size) = size_if_module_exists { + gas_meter + .charge_dependency(false, addr, name, NumBytes::new(size)) + .map_err(|err| { + err.finish(Location::Module(ModuleId::new( + *addr, + name.to_owned(), + ))) + })?; + } + } - // Charge all dependencies. - // - // Must exclude the ones that are in the current bundle because they have not - // been published yet. - let module_ids_in_bundle = modules - .iter() - .map(|module| (module.self_addr(), module.self_name())) - .collect::>(); + // Charge all modules in the bundle that is about to be published. + for (module, blob) in modules.iter().zip(bundle.iter()) { + let module_id = &module.self_id(); + gas_meter + .charge_dependency( + true, + module_id.address(), + module_id.name(), + NumBytes::new(blob.code().len() as u64), + ) + .map_err(|err| err.finish(Location::Undefined))?; + } - session.check_dependencies_and_charge_gas( - gas_meter, - traversal_context, - modules + // Charge all dependencies. + // + // Must exclude the ones that are in the current bundle because they have not + // been published yet. + let module_ids_in_bundle = modules .iter() - .flat_map(|module| { - module - .immediate_dependencies_iter() - .chain(module.immediate_friends_iter()) - }) - .filter(|addr_and_name| !module_ids_in_bundle.contains(addr_and_name)), - )?; + .map(|module| (module.self_addr(), module.self_name())) + .collect::>(); - // TODO: Revisit the order of traversal. Consider switching to alphabetical order. - } + session.check_dependencies_and_charge_gas( + gas_meter, + traversal_context, + modules + .iter() + .flat_map(|module| { + module + .immediate_dependencies_iter() + .chain(module.immediate_friends_iter()) + }) + .filter(|addr_and_name| !module_ids_in_bundle.contains(addr_and_name)), + )?; - if self - .timed_features() - .is_enabled(TimedFeatureFlag::ModuleComplexityCheck) - { - for (module, blob) in modules.iter().zip(bundle.iter()) { - // TODO(Gas): Make budget configurable. - let budget = 2048 + blob.code().len() as u64 * 20; - move_binary_format::check_complexity::check_module_complexity(module, budget) + // TODO: Revisit the order of traversal. Consider switching to alphabetical order. + } + + if self + .timed_features() + .is_enabled(TimedFeatureFlag::ModuleComplexityCheck) + { + for (module, blob) in modules.iter().zip(bundle.iter()) { + // TODO(Gas): Make budget configurable. + let budget = 2048 + blob.code().len() as u64 * 20; + move_binary_format::check_complexity::check_module_complexity( + module, budget, + ) .map_err(|err| err.finish(Location::Undefined))?; + } } - } - // Validate the module bundle - self.validate_publish_request(session, modules, expected_modules, allowed_deps)?; + // Validate the module bundle + self.validate_publish_request(session, modules, expected_modules, allowed_deps)?; - // Check what modules exist before publishing. - let mut exists = BTreeSet::new(); - for m in modules { - let id = m.self_id(); - if session.exists_module(&id)? { - exists.insert(id); + // Check what modules exist before publishing. + let mut exists = BTreeSet::new(); + for m in modules { + let id = m.self_id(); + if session.exists_module(&id)? { + exists.insert(id); + } } - } - // Publish the bundle and execute initializers - // publish_module_bundle doesn't actually load the published module into - // the loader cache. It only puts the module data in the data cache. - return_on_failure!(session.publish_module_bundle_with_compat_config( - bundle.into_inner(), - destination, - gas_meter, - Compatibility::new( - true, - true, - !self - .features() - .is_enabled(FeatureFlag::TREAT_FRIEND_AS_PRIVATE), - ), - )); + // Publish the bundle and execute initializers + // publish_module_bundle doesn't actually load the published module into + // the loader cache. It only puts the module data in the data cache. + session.publish_module_bundle_with_compat_config( + bundle.into_inner(), + destination, + gas_meter, + Compatibility::new( + true, + !self + .features() + .is_enabled(FeatureFlag::TREAT_FRIEND_AS_PRIVATE), + ), + )?; - self.execute_module_initialization( - session, - gas_meter, - modules, - exists, - &[destination], - new_published_modules_loaded, - traversal_context, - ) - } else { - Ok(()) - } + self.execute_module_initialization( + session, + gas_meter, + modules, + exists, + &[destination], + new_published_modules_loaded, + traversal_context, + )?; + } + Ok::<(), VMError>(()) + })?; + session.finish(change_set_configs) } /// Validate a publish request. @@ -1499,6 +1580,14 @@ impl AptosVM { { self.reject_unstable_bytecode(modules)?; } + + if self + .features() + .is_enabled(FeatureFlag::DISALLOW_USER_NATIVES) + { + verifier::native_validation::validate_module_natives(modules)?; + } + for m in modules { if !expected_modules.remove(m.self_id().name().as_str()) { return Err(Self::metadata_validation_error(&format!( @@ -1561,6 +1650,22 @@ impl AptosVM { Ok(()) } + /// Check whether the script can be run on mainnet based on the unstable tag in the metadata + pub fn reject_unstable_bytecode_for_script(&self, module: &CompiledScript) -> VMResult<()> { + if self.chain_id().is_mainnet() { + if let Some(metadata) = + aptos_framework::get_compilation_metadata_from_compiled_script(module) + { + if metadata.unstable { + return Err(PartialVMError::new(StatusCode::UNSTABLE_BYTECODE_REJECTED) + .with_message("script marked unstable cannot be run on mainnet".to_string()) + .finish(Location::Script)); + } + } + } + Ok(()) + } + fn metadata_validation_error(msg: &str) -> VMError { PartialVMError::new(StatusCode::CONSTRAINT_NOT_SATISFIED) .with_message(format!("metadata and code bundle mismatch: {}", msg)) @@ -1616,7 +1721,7 @@ impl AptosVM { // transaction, or clean up the failed state. fn on_user_transaction_execution_failure( &self, - prologue_change_set: VMChangeSet, + prologue_session_change_set: SystemSessionChangeSet, err: VMStatus, resolver: &impl AptosMoveResolver, txn_data: &TransactionMetadata, @@ -1636,7 +1741,7 @@ impl AptosVM { }; self.failed_transaction_cleanup( - prologue_change_set, + prologue_session_change_set, err, gas_meter, txn_data, @@ -1660,9 +1765,7 @@ impl AptosVM { let mut traversal_context = TraversalContext::new(&traversal_storage); // Revalidate the transaction. - let mut prologue_session = - unwrap_or_discard!(PrologueSession::new(self, &txn_data, resolver)); - + let mut prologue_session = PrologueSession::new(self, &txn_data, resolver); let exec_result = prologue_session.execute(|session| { self.validate_signed_transaction( session, @@ -1864,23 +1967,19 @@ impl AptosVM { write_set_payload: &WriteSetPayload, txn_sender: Option, session_id: SessionId, - ) -> Result { - let change_set_configs = - ChangeSetConfigs::unlimited_at_gas_feature_version(self.gas_feature_version); - + ) -> Result<(VMChangeSet, ModuleWriteSet), VMStatus> { match write_set_payload { WriteSetPayload::Direct(change_set) => { // this transaction is never delayed field capable. // it requires restarting execution afterwards, // which allows it to be used as last transaction in delayed_field_enabled context. - let change = VMChangeSet::try_from_storage_change_set_with_delayed_field_optimization_disabled( - change_set.clone(), - &change_set_configs, - ) - .map_err(|e| e.into_vm_status())?; + let (change_set, module_write_set) = + create_vm_change_set_with_module_write_set_when_delayed_field_optimization_disabled( + change_set.clone(), + ); // validate_waypoint_change_set checks that this is true, so we only log here. - if !Self::should_restart_execution(&change) { + if !Self::should_restart_execution(change_set.events()) { // This invariant needs to hold irrespectively, so we log error always. // but if we are in delayed_field_optimization_capable context, we cannot execute any transaction after this. // as transaction afterwards would be executed assuming delayed fields are exchanged and @@ -1890,7 +1989,7 @@ impl AptosVM { "[aptos_vm] direct write set finished without requiring should_restart_execution"); } - Ok(change) + Ok((change_set, module_write_set)) }, WriteSetPayload::Script { script, execute_as } => { let mut tmp_session = self.new_session(resolver, session_id, None); @@ -1909,7 +2008,14 @@ impl AptosVM { senders, script, )?; - Ok(tmp_session.finish(&change_set_configs)?) + + let change_set_configs = + ChangeSetConfigs::unlimited_at_gas_feature_version(self.gas_feature_version); + + // TODO(George): This session should not publish modules, and should be using native + // code context instead. + let (change_set, module_write_set) = tmp_session.finish(&change_set_configs)?; + Ok((change_set, module_write_set)) }, } } @@ -1919,6 +2025,7 @@ impl AptosVM { executor_view: &dyn ExecutorView, resource_group_view: &dyn ResourceGroupView, change_set: &VMChangeSet, + module_write_set: &ModuleWriteSet, ) -> PartialVMResult<()> { assert!( change_set.aggregator_v1_write_set().is_empty(), @@ -1927,7 +2034,7 @@ impl AptosVM { // All Move executions satisfy the read-before-write property. Thus we need to read each // access path that the write set is going to update. - for state_key in change_set.module_write_set().keys() { + for state_key in module_write_set.write_ops().keys() { executor_view.get_module_state_value(state_key)?; } for (state_key, write_op) in change_set.resource_write_set().iter() { @@ -1947,15 +2054,13 @@ impl AptosVM { } fn validate_waypoint_change_set( - change_set: &VMChangeSet, + events: &[(ContractEvent, Option)], log_context: &AdapterLogSchema, ) -> Result<(), VMStatus> { - let has_new_block_event = change_set - .events() + let has_new_block_event = events .iter() .any(|(e, _)| e.event_key() == Some(&new_block_event_key())); - let has_new_epoch_event = change_set - .events() + let has_new_epoch_event = events .iter() .any(|(e, _)| e.event_key() == Some(&new_epoch_event_key())); if has_new_block_event && has_new_epoch_event { @@ -1977,18 +2082,19 @@ impl AptosVM { ) -> Result<(VMStatus, VMOutput), VMStatus> { // TODO: user specified genesis id to distinguish different genesis write sets let genesis_id = HashValue::zero(); - let change_set = self.execute_write_set( + let (change_set, module_write_set) = self.execute_write_set( resolver, &write_set_payload, Some(account_config::reserved_vm_address()), SessionId::genesis(genesis_id), )?; - Self::validate_waypoint_change_set(&change_set, log_context)?; + Self::validate_waypoint_change_set(change_set.events(), log_context)?; self.read_change_set( resolver.as_executor_view(), resolver.as_resource_group_view(), &change_set, + &module_write_set, ) .map_err(|e| e.finish(Location::Undefined).into_vm_status())?; @@ -1996,8 +2102,9 @@ impl AptosVM { let output = VMOutput::new( change_set, + module_write_set, FeeStatement::zero(), - TransactionStatus::from_executed_vm_status(VMStatus::Executed), + TransactionStatus::Keep(ExecutionStatus::Success), TransactionAuxiliaryData::default(), ); Ok((VMStatus::Executed, output)) @@ -2041,8 +2148,6 @@ impl AptosVM { let output = get_system_transaction_output( session, - FeeStatement::zero(), - ExecutionStatus::Success, &get_or_vm_startup_failure(&self.storage_gas_params, log_context)?.change_set_configs, )?; Ok((VMStatus::Executed, output)) @@ -2122,8 +2227,6 @@ impl AptosVM { let output = get_system_transaction_output( session, - FeeStatement::zero(), - ExecutionStatus::Success, &get_or_vm_startup_failure(&self.storage_gas_params, log_context)?.change_set_configs, )?; Ok((VMStatus::Executed, output)) @@ -2259,8 +2362,10 @@ impl AptosVM { transaction_validation::run_script_prologue( session, txn_data, + self.features(), log_context, traversal_context, + self.is_simulation, ) }, TransactionPayload::Multisig(multisig_payload) => { @@ -2270,13 +2375,18 @@ impl AptosVM { transaction_validation::run_script_prologue( session, txn_data, + self.features(), log_context, traversal_context, + self.is_simulation, )?; - // Skip validation if this is part of tx simulation. - // This allows simulating multisig txs without having to first create the multisig - // tx. - if !self.is_simulation { + // Once "simulation_enhancement" is enabled, the simulation path also validates the + // multisig transaction by running the multisig prologue. + if !self.is_simulation + || self + .features() + .is_transaction_simulation_enhancement_enabled() + { transaction_validation::run_multisig_prologue( session, txn_data, @@ -2295,10 +2405,9 @@ impl AptosVM { } } - pub fn should_restart_execution(vm_change_set: &VMChangeSet) -> bool { + pub fn should_restart_execution(events: &[(ContractEvent, Option)]) -> bool { let new_epoch_event_key = new_epoch_event_key(); - vm_change_set - .events() + events .iter() .any(|(event, _)| event.event_key() == Some(&new_epoch_event_key)) } @@ -2467,7 +2576,6 @@ impl VMExecutor for AptosVM { _, NoOpTransactionCommitHook, >( - Arc::clone(&RAYON_EXEC_POOL), transactions, state_view, BlockExecutorConfig { @@ -2559,6 +2667,19 @@ impl VMValidator for AptosVM { } } + if !self + .features() + .is_enabled(FeatureFlag::ALLOW_SERIALIZED_SCRIPT_ARGS) + { + if let TransactionPayload::Script(script) = transaction.payload() { + for arg in script.args() { + if let TransactionArgument::Serialized(_) = arg { + return VMValidatorResult::error(StatusCode::FEATURE_UNDER_GATING); + } + } + } + } + let txn = match transaction.check_signature() { Ok(t) => t, _ => { @@ -2675,8 +2796,13 @@ pub(crate) fn is_account_init_for_sponsored_transaction( && txn_data.fee_payer.is_some() && txn_data.sequence_number == 0 && resolver - .get_resource(&txn_data.sender(), &AccountResource::struct_tag()) - .map(|data| data.is_none()) + .get_resource_bytes_with_metadata_and_layout( + &txn_data.sender(), + &AccountResource::struct_tag(), + &resolver.get_module_metadata(&AccountResource::struct_tag().module_id()), + None, + ) + .map(|(data, _)| data.is_none()) .map_err(|e| e.finish(Location::Undefined))?, ) } diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index 1aec0a6254cf7..08f3dcd0e3ce9 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -40,13 +40,23 @@ use move_core_types::{ vm_status::{StatusCode, VMStatus}, }; use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; -use once_cell::sync::OnceCell; +use once_cell::sync::{Lazy, OnceCell}; use rayon::ThreadPool; use std::{ collections::{BTreeMap, HashSet}, sync::Arc, }; +pub static RAYON_EXEC_POOL: Lazy> = Lazy::new(|| { + Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_cpus::get()) + .thread_name(|index| format!("par_exec-{}", index)) + .build() + .unwrap(), + ) +}); + /// Output type wrapper used by block executor. VM output is stored first, then /// transformed into TransactionOutput type that is returned. #[derive(Debug)] @@ -114,7 +124,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get resource group writes") - .change_set() .resource_write_set() .iter() .flat_map(|(key, write)| { @@ -143,7 +152,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get metadata ops") - .change_set() .resource_write_set() .iter() .flat_map(|(key, write)| { @@ -162,7 +170,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get resource writes") - .change_set() .resource_write_set() .iter() .flat_map(|(key, write)| match write { @@ -185,7 +192,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get module writes") - .change_set() .module_write_set() .clone() } @@ -196,7 +202,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get aggregator V1 writes") - .change_set() .aggregator_v1_write_set() .clone() } @@ -207,7 +212,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get deltas") - .change_set() .aggregator_v1_delta_set() .iter() .map(|(key, op)| (key.clone(), *op)) @@ -220,7 +224,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get aggregator change set") - .change_set() .delayed_field_change_set() .clone() } @@ -232,7 +235,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output to be set to get reads") - .change_set() .resource_write_set() .iter() .flat_map(|(key, write)| { @@ -250,7 +252,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output to be set to get reads") - .change_set() .resource_write_set() .iter() .flat_map(|(key, write)| { @@ -271,7 +272,6 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .lock() .as_ref() .expect("Output must be set to get events") - .change_set() .events() .to_vec() } @@ -339,33 +339,21 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { fn output_approx_size(&self) -> u64 { let vm_output = self.vm_output.lock(); - let change_set = vm_output + vm_output .as_ref() - .expect("Output to be set to get write summary") - .change_set(); - - let mut size = 0; - for (state_key, write_size) in change_set.write_set_size_iter() { - size += state_key.size() as u64 + write_size.write_len().unwrap_or(0); - } - - for (event, _) in change_set.events() { - size += event.size() as u64; - } - - size + .expect("Output to be set to get approximate size") + .materialized_size() } fn get_write_summary(&self) -> HashSet> { let vm_output = self.vm_output.lock(); - let change_set = vm_output + let output = vm_output .as_ref() - .expect("Output to be set to get write summary") - .change_set(); + .expect("Output to be set to get write summary"); let mut writes = HashSet::new(); - for (state_key, write) in change_set.resource_write_set() { + for (state_key, write) in output.resource_write_set() { match write { AbstractResourceWriteOp::Write(_) | AbstractResourceWriteOp::WriteWithDelayedFields(_) => { @@ -385,7 +373,7 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { } } - for identifier in change_set.delayed_field_change_set().keys() { + for identifier in output.delayed_field_change_set().keys() { writes.insert(InputOutputKey::DelayedField(*identifier)); } @@ -393,10 +381,10 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { } } -pub struct BlockAptosVM(); +pub struct BlockAptosVM; impl BlockAptosVM { - pub fn execute_block< + pub fn execute_block_on_thread_pool< S: StateView + Sync, L: TransactionCommitHook, >( @@ -455,4 +443,23 @@ impl BlockAptosVM { Err(BlockExecutionError::FatalVMError(err)) => Err(err), } } + + /// Uses shared thread pool to execute blocks. + pub fn execute_block< + S: StateView + Sync, + L: TransactionCommitHook, + >( + signature_verified_block: &[SignatureVerifiedTransaction], + state_view: &S, + config: BlockExecutorConfig, + transaction_commit_listener: Option, + ) -> Result, VMStatus> { + Self::execute_block_on_thread_pool::( + Arc::clone(&RAYON_EXEC_POOL), + signature_verified_block, + state_view, + config, + transaction_commit_listener, + ) + } } diff --git a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs index a73dc4bfd78d9..058ea9de2553d 100644 --- a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs +++ b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs @@ -33,7 +33,7 @@ impl ExecutorTask for AptosExecutorTask { type Txn = SignatureVerifiedTransaction; fn init(env: Self::Environment, state_view: &impl StateView) -> Self { - let vm = AptosVM::new_with_environment(env, state_view); + let vm = AptosVM::new_with_environment(env, state_view, false); let id = state_view.id(); Self { vm, id } } @@ -76,7 +76,7 @@ impl ExecutorTask for AptosExecutorTask { ExecutionStatus::DelayedFieldsCodeInvariantError( vm_status.message().cloned().unwrap_or_default(), ) - } else if AptosVM::should_restart_execution(vm_output.change_set()) { + } else if AptosVM::should_restart_execution(vm_output.events()) { speculative_info!( &log_context, "Reconfiguration occurred: restart required".into() diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index da4e7a544c1f4..7861880b40479 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -40,10 +40,12 @@ use move_core_types::{ account_address::AccountAddress, language_storage::{ModuleId, StructTag}, metadata::Metadata, - resolver::{resource_size, ModuleResolver, ResourceResolver}, value::MoveTypeLayout, }; -use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; +use move_vm_types::{ + delayed_values::delayed_field_id::DelayedFieldID, + resolver::{resource_size, ModuleResolver, ResourceResolver}, +}; use std::{ cell::RefCell, collections::{BTreeMap, HashMap, HashSet}, @@ -172,22 +174,18 @@ impl<'e, E: ExecutorView> ResourceGroupResolver for StorageAdapter<'e, E> { impl<'e, E: ExecutorView> AptosMoveResolver for StorageAdapter<'e, E> {} impl<'e, E: ExecutorView> ResourceResolver for StorageAdapter<'e, E> { - type Error = PartialVMError; - fn get_resource_bytes_with_metadata_and_layout( &self, address: &AccountAddress, struct_tag: &StructTag, metadata: &[Metadata], maybe_layout: Option<&MoveTypeLayout>, - ) -> Result<(Option, usize), Self::Error> { + ) -> PartialVMResult<(Option, usize)> { self.get_any_resource_with_layout(address, struct_tag, metadata, maybe_layout) } } impl<'e, E: ExecutorView> ModuleResolver for StorageAdapter<'e, E> { - type Error = PartialVMError; - fn get_module_metadata(&self, module_id: &ModuleId) -> Vec { let module_bytes = match self.get_module(module_id) { Ok(Some(bytes)) => bytes, @@ -202,7 +200,7 @@ impl<'e, E: ExecutorView> ModuleResolver for StorageAdapter<'e, E> { module.metadata } - fn get_module(&self, module_id: &ModuleId) -> Result, Self::Error> { + fn get_module(&self, module_id: &ModuleId) -> PartialVMResult> { self.executor_view .get_module_bytes(&StateKey::module_id(module_id)) } diff --git a/aptos-move/aptos-vm/src/errors.rs b/aptos-move/aptos-vm/src/errors.rs index 629c576875b37..f054a7c819222 100644 --- a/aptos-move/aptos-vm/src/errors.rs +++ b/aptos-move/aptos-vm/src/errors.rs @@ -147,6 +147,13 @@ pub fn convert_prologue_error( }; VMStatus::error(new_major_status, None) }, + // Speculative errors are returned for caller to handle. + e @ VMStatus::Error { + status_code: + StatusCode::SPECULATIVE_EXECUTION_ABORT_ERROR + | StatusCode::DELAYED_MATERIALIZATION_CODE_INVARIANT_ERROR, + .. + } => e, status @ VMStatus::ExecutionFailure { .. } | status @ VMStatus::Error { .. } => { speculative_error!( log_context, @@ -196,6 +203,13 @@ pub fn convert_epilogue_error( ) }, }, + // Speculative errors are returned for caller to handle. + e @ VMStatus::Error { + status_code: + StatusCode::SPECULATIVE_EXECUTION_ABORT_ERROR + | StatusCode::DELAYED_MATERIALIZATION_CODE_INVARIANT_ERROR, + .. + } => e, status => { let err_msg = format!("[aptos_vm] Unexpected success epilogue error: {:?}", status); speculative_error!(log_context, err_msg.clone()); diff --git a/aptos-move/aptos-vm/src/gas.rs b/aptos-move/aptos-vm/src/gas.rs index 619ba71f3d68e..d5131d297828d 100644 --- a/aptos-move/aptos-vm/src/gas.rs +++ b/aptos-move/aptos-vm/src/gas.rs @@ -243,7 +243,7 @@ pub(crate) fn check_gas( speculative_warn!( log_context, format!( - "[VM] Gas unit error; min {}, submitted {}", + "[VM] Gas unit error; max {}, submitted {}", txn_gas_params.max_price_per_gas_unit, txn_metadata.gas_unit_price() ), diff --git a/aptos-move/aptos-vm/src/keyless_validation.rs b/aptos-move/aptos-vm/src/keyless_validation.rs index 72164a82c1cba..7ea08048986e4 100644 --- a/aptos-move/aptos-vm/src/keyless_validation.rs +++ b/aptos-move/aptos-vm/src/keyless_validation.rs @@ -6,10 +6,10 @@ use crate::move_vm_ext::AptosMoveResolver; use aptos_crypto::ed25519::Ed25519PublicKey; use aptos_types::{ invalid_signature, - jwks::{jwk::JWK, PatchedJWKs}, + jwks::{jwk::JWK, AllProvidersJWKs, FederatedJWKs, PatchedJWKs}, keyless::{ - get_public_inputs_hash, Configuration, EphemeralCertificate, Groth16ProofAndStatement, - Groth16VerificationKey, KeylessPublicKey, KeylessSignature, ZKP, + get_public_inputs_hash, AnyKeylessPublicKey, Configuration, EphemeralCertificate, + Groth16ProofAndStatement, Groth16VerificationKey, KeylessPublicKey, KeylessSignature, ZKP, }, on_chain_config::{CurrentTimeMicroseconds, Features, OnChainConfig}, transaction::authenticator::{EphemeralPublicKey, EphemeralSignature}, @@ -18,7 +18,10 @@ use aptos_types::{ use ark_bn254::Bn254; use ark_groth16::PreparedVerifyingKey; use move_binary_format::errors::Location; -use move_core_types::{language_storage::CORE_CODE_ADDRESS, move_resource::MoveStructType}; +use move_core_types::{ + account_address::AccountAddress, language_storage::CORE_CODE_ADDRESS, + move_resource::MoveStructType, +}; use serde::Deserialize; macro_rules! value_deserialization_error { @@ -33,13 +36,22 @@ macro_rules! value_deserialization_error { fn get_resource_on_chain Deserialize<'a>>( resolver: &impl AptosMoveResolver, ) -> anyhow::Result { + get_resource_on_chain_at_addr(&CORE_CODE_ADDRESS, resolver) +} + +fn get_resource_on_chain_at_addr Deserialize<'a>>( + addr: &AccountAddress, + resolver: &impl AptosMoveResolver, +) -> anyhow::Result { + let metadata = resolver.get_module_metadata(&T::struct_tag().module_id()); let bytes = resolver - .get_resource(&CORE_CODE_ADDRESS, &T::struct_tag()) + .get_resource_bytes_with_metadata_and_layout(addr, &T::struct_tag(), &metadata, None) .map_err(|e| e.finish(Location::Undefined).into_vm_status())? + .0 .ok_or_else(|| { value_deserialization_error!(format!( "get_resource failed on {}::{}::{}", - CORE_CODE_ADDRESS.to_hex_literal(), + addr.to_hex_literal(), T::struct_tag().module, T::struct_tag().name )) @@ -47,7 +59,7 @@ fn get_resource_on_chain Deserialize<'a>>( let obj = bcs::from_bytes::(&bytes).map_err(|_| { value_deserialization_error!(format!( "could not deserialize {}::{}::{}", - CORE_CODE_ADDRESS.to_hex_literal(), + addr.to_hex_literal(), T::struct_tag().module, T::struct_tag().name )) @@ -68,6 +80,13 @@ fn get_jwks_onchain(resolver: &impl AptosMoveResolver) -> anyhow::Result anyhow::Result { + get_resource_on_chain_at_addr::(jwk_addr, resolver) +} + pub(crate) fn get_groth16_vk_onchain( resolver: &impl AptosMoveResolver, ) -> anyhow::Result { @@ -80,14 +99,23 @@ fn get_configs_onchain( get_resource_on_chain::(resolver) } +// Fetches a JWK from the PatchedJWKs dictionary (which maps each `iss` to its set of JWKs) +// +// This could fail for several reasons: +// - alg field mismatch: JWT header vs JWK +// - bad JWT header +// - bad Any serialization (something is really wrong) +// - did not find the JWK for the kid +// - found the JWK for the kid but it is an UnsupportedJWK fn get_jwk_for_authenticator( - jwks: &PatchedJWKs, + jwks: &AllProvidersJWKs, pk: &KeylessPublicKey, sig: &KeylessSignature, ) -> Result { let jwt_header = sig .parse_jwt_header() .map_err(|_| invalid_signature!("Failed to parse JWT header"))?; + let jwk_move_struct = jwks.get_jwk(&pk.iss_val, &jwt_header.kid).map_err(|_| { invalid_signature!(format!( "JWK for {} with KID {} was not found", @@ -122,12 +150,12 @@ fn get_jwk_for_authenticator( /// Ensures that **all** keyless authenticators in the transaction are valid. pub(crate) fn validate_authenticators( pvk: &Option>, - authenticators: &Vec<(KeylessPublicKey, KeylessSignature)>, + authenticators: &Vec<(AnyKeylessPublicKey, KeylessSignature)>, features: &Features, resolver: &impl AptosMoveResolver, ) -> Result<(), VMStatus> { let mut with_zk = false; - for (_, sig) in authenticators { + for (pk, sig) in authenticators { // Feature-gating for keyless TXNs (whether ZK or ZKless, whether passkey-based or not) if matches!(sig.cert, EphemeralCertificate::ZeroKnowledgeSig { .. }) { if !features.is_zk_keyless_enabled() { @@ -146,6 +174,11 @@ pub(crate) fn validate_authenticators( { return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); } + if matches!(pk, AnyKeylessPublicKey::Federated { .. }) + && !features.is_federated_keyless_enabled() + { + return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); + } } // If there are ZK authenticators, the Groth16 VK must have been set on-chain. @@ -162,11 +195,12 @@ pub(crate) fn validate_authenticators( let onchain_timestamp_obj = get_current_time_onchain(resolver)?; // Check the expiry timestamp on all authenticators first to fail fast for (_, sig) in authenticators { - sig.verify_expiry(&onchain_timestamp_obj).map_err(|_| { - // println!("[aptos-vm][groth16] ZKP expired"); + sig.verify_expiry(onchain_timestamp_obj.microseconds) + .map_err(|_| { + // println!("[aptos-vm][groth16] ZKP expired"); - invalid_signature!("The ephemeral keypair has expired") - })?; + invalid_signature!("The ephemeral keypair has expired") + })?; } let patched_jwks = get_jwks_onchain(resolver)?; @@ -185,7 +219,30 @@ pub(crate) fn validate_authenticators( }; for (pk, sig) in authenticators { - let jwk = get_jwk_for_authenticator(&patched_jwks, pk, sig)?; + // Try looking up the jwk in 0x1. + let jwk = match get_jwk_for_authenticator(&patched_jwks.jwks, pk.inner_keyless_pk(), sig) { + // 1: If found in 0x1, then we consider that the ground truth & we are done. + Ok(jwk) => jwk, + // 2: If not found in 0x1, we check the Keyless PK type. + Err(e) => { + match pk { + // 2.a: If this is a federated keyless account; look in `jwk_addr` for JWKs + AnyKeylessPublicKey::Federated(fed_pk) => { + let federated_jwks = get_federated_jwks_onchain(resolver, &fed_pk.jwk_addr) + .map_err(|_| { + invalid_signature!(format!( + "Could not fetch federated PatchedJWKs at {}", + fed_pk.jwk_addr + )) + })?; + // 2.a.i If not found in jwk_addr either, then we fail the validation. + get_jwk_for_authenticator(&federated_jwks.jwks, pk.inner_keyless_pk(), sig)? + }, + // 2.b: If this is not a federated keyless account, then we fail the validation. + AnyKeylessPublicKey::Normal(_) => return Err(e), + } + }, + }; match &sig.cert { EphemeralCertificate::ZeroKnowledgeSig(zksig) => match jwk { @@ -205,7 +262,10 @@ pub(crate) fn validate_authenticators( ZKP::Groth16(groth16proof) => { // let start = std::time::Instant::now(); let public_inputs_hash = get_public_inputs_hash( - sig, pk, &rsa_jwk, config, + sig, + pk.inner_keyless_pk(), + &rsa_jwk, + config, ) .map_err(|_| { // println!("[aptos-vm][groth16] PIH computation failed"); @@ -267,7 +327,12 @@ pub(crate) fn validate_authenticators( match jwk { JWK::RSA(rsa_jwk) => { openid_sig - .verify_jwt_claims(sig.exp_date_secs, &sig.ephemeral_pubkey, pk, config) + .verify_jwt_claims( + sig.exp_date_secs, + &sig.ephemeral_pubkey, + pk.inner_keyless_pk(), + config, + ) .map_err(|_| invalid_signature!("OpenID claim verification failed"))?; // TODO(OpenIdSig): Implement batch verification for all RSA signatures in diff --git a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs index bcd7a1f381d1e..7348a280dffdf 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs @@ -21,6 +21,7 @@ use move_core_types::{ account_address::AccountAddress, language_storage::StructTag, vm_status::StatusCode, }; pub use session::session_id::SessionId; +pub use warm_vm_cache::flush_warm_vm_cache; pub(crate) fn resource_state_key( address: &AccountAddress, diff --git a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs index 0df61e5088f17..1791a38bb08d6 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs @@ -8,8 +8,9 @@ use aptos_vm_types::resolver::{ ExecutorView, ResourceGroupSize, ResourceGroupView, StateStorageView, }; use bytes::Bytes; -use move_binary_format::errors::{PartialVMError, PartialVMResult}; -use move_core_types::{language_storage::StructTag, resolver::MoveResolver}; +use move_binary_format::errors::PartialVMResult; +use move_core_types::language_storage::StructTag; +use move_vm_types::resolver::{ModuleResolver, ResourceResolver}; use std::collections::{BTreeMap, HashMap}; /// A general resolver used by AptosVM. Allows to implement custom hooks on @@ -19,7 +20,8 @@ pub trait AptosMoveResolver: AggregatorV1Resolver + ConfigStorage + DelayedFieldResolver - + MoveResolver + + ModuleResolver + + ResourceResolver + ResourceGroupResolver + StateStorageView + TableResolver diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs index d08cd89af0f1d..7e8b576cb1248 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs @@ -23,7 +23,10 @@ use aptos_types::{ state_store::state_key::StateKey, transaction::user_transaction_context::UserTransactionContext, }; -use aptos_vm_types::{change_set::VMChangeSet, storage::change_set_configs::ChangeSetConfigs}; +use aptos_vm_types::{ + change_set::VMChangeSet, module_write_set::ModuleWriteSet, + storage::change_set_configs::ChangeSetConfigs, +}; use bytes::Bytes; use move_binary_format::errors::{Location, PartialVMError, PartialVMResult, VMResult}; use move_core_types::{ @@ -112,7 +115,7 @@ impl<'r, 'l> SessionExt<'r, 'l> { } } - pub fn finish(self, configs: &ChangeSetConfigs) -> VMResult { + pub fn finish(self, configs: &ChangeSetConfigs) -> VMResult<(VMChangeSet, ModuleWriteSet)> { let move_vm = self.inner.get_move_vm(); let resource_converter = |value: Value, @@ -161,18 +164,18 @@ impl<'r, 'l> SessionExt<'r, 'l> { let woc = WriteOpConverter::new(self.resolver, self.is_storage_slot_metadata_enabled); - let change_set = Self::convert_change_set( + let (change_set, module_write_set) = Self::convert_change_set( &woc, change_set, resource_group_change_set, events, table_change_set, aggregator_change_set, - configs, + configs.legacy_resource_creation_as_modification(), ) .map_err(|e| e.finish(Location::Undefined))?; - Ok(change_set) + Ok((change_set, module_write_set)) } pub fn extract_publish_request(&mut self) -> Option { @@ -247,12 +250,12 @@ impl<'r, 'l> SessionExt<'r, 'l> { /// * If group or data doesn't exist, Unreachable /// * Otherwise modify /// * Delete -- remove element from container - /// * If group or data does't exist, Unreachable + /// * If group or data doesn't exist, Unreachable /// * If elements remain, Modify /// * Otherwise delete /// /// V1 Resource group change set behavior keeps ops for individual resources separate, not - /// merging them into the a single op corresponding to the whole resource group (V0). + /// merging them into a single op corresponding to the whole resource group (V0). fn split_and_merge_resource_groups( runtime: &MoveVM, resolver: &dyn AptosMoveResolver, @@ -348,18 +351,21 @@ impl<'r, 'l> SessionExt<'r, 'l> { Ok((change_set_filtered, resource_group_change_set)) } - pub(crate) fn convert_change_set( + fn convert_change_set( woc: &WriteOpConverter, change_set: ChangeSet, resource_group_change_set: ResourceGroupChangeSet, events: Vec<(ContractEvent, Option)>, table_change_set: TableChangeSet, aggregator_change_set: AggregatorChangeSet, - configs: &ChangeSetConfigs, - ) -> PartialVMResult { + legacy_resource_creation_as_modification: bool, + ) -> PartialVMResult<(VMChangeSet, ModuleWriteSet)> { let mut resource_write_set = BTreeMap::new(); let mut resource_group_write_set = BTreeMap::new(); - let mut module_write_set = BTreeMap::new(); + + let mut has_modules_published_to_special_address = false; + let mut module_write_ops = BTreeMap::new(); + let mut aggregator_v1_write_set = BTreeMap::new(); let mut aggregator_v1_delta_set = BTreeMap::new(); @@ -370,16 +376,19 @@ impl<'r, 'l> SessionExt<'r, 'l> { let op = woc.convert_resource( &state_key, blob_and_layout_op, - configs.legacy_resource_creation_as_modification(), + legacy_resource_creation_as_modification, )?; resource_write_set.insert(state_key, op); } for (name, blob_op) in modules { + if addr.is_special() { + has_modules_published_to_special_address = true; + } let state_key = StateKey::module(&addr, &name); let op = woc.convert_module(&state_key, blob_op, false)?; - module_write_set.insert(state_key, op); + module_write_ops.insert(state_key, op); } } @@ -436,18 +445,20 @@ impl<'r, 'l> SessionExt<'r, 'l> { .filter(|(state_key, _)| !resource_group_write_set.contains_key(state_key)) .collect(); - VMChangeSet::new_expanded( + let change_set = VMChangeSet::new_expanded( resource_write_set, resource_group_write_set, - module_write_set, aggregator_v1_write_set, aggregator_v1_delta_set, aggregator_change_set.delayed_field_changes, reads_needing_exchange, group_reads_needing_change, events, - configs, - ) + )?; + let module_write_set = + ModuleWriteSet::new(has_modules_published_to_special_address, module_write_ops); + + Ok((change_set, module_write_set)) } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/respawned_session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/respawned_session.rs index d8853dd0a9df6..3d06689531bb7 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/respawned_session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/respawned_session.rs @@ -10,7 +10,10 @@ use crate::{ AptosVM, }; use aptos_types::transaction::user_transaction_context::UserTransactionContext; -use aptos_vm_types::{change_set::VMChangeSet, storage::change_set_configs::ChangeSetConfigs}; +use aptos_vm_types::{ + change_set::VMChangeSet, module_write_set::ModuleWriteSet, + storage::change_set_configs::ChangeSetConfigs, +}; use move_core_types::vm_status::{err_msg, StatusCode, VMStatus}; fn unwrap_or_invariant_violation(value: Option, msg: &str) -> Result { @@ -40,21 +43,21 @@ impl<'r, 'l> RespawnedSession<'r, 'l> { base: &'r impl AptosMoveResolver, previous_session_change_set: VMChangeSet, user_transaction_context_opt: Option, - ) -> Result { + ) -> Self { let executor_view = ExecutorViewWithChangeSet::new( base.as_executor_view(), base.as_resource_group_view(), previous_session_change_set, ); - Ok(RespawnedSessionBuilder { + RespawnedSessionBuilder { executor_view, resolver_builder: |executor_view| vm.as_move_resolver_with_group_view(executor_view), session_builder: |resolver| { Some(vm.new_session(resolver, session_id, user_transaction_context_opt)) }, } - .build()) + .build() } pub fn execute( @@ -72,8 +75,8 @@ impl<'r, 'l> RespawnedSession<'r, 'l> { mut self, change_set_configs: &ChangeSetConfigs, assert_no_additional_creation: bool, - ) -> Result { - let additional_change_set = self.with_session_mut(|session| { + ) -> Result<(VMChangeSet, ModuleWriteSet), VMStatus> { + let (additional_change_set, module_write_set) = self.with_session_mut(|session| { unwrap_or_invariant_violation( session.take(), "VM session cannot be finished more than once.", @@ -96,13 +99,13 @@ impl<'r, 'l> RespawnedSession<'r, 'l> { } let mut change_set = self.into_heads().executor_view.change_set; change_set - .squash_additional_change_set(additional_change_set, change_set_configs) + .squash_additional_change_set(additional_change_set) .map_err(|_err| { VMStatus::error( StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, err_msg("Failed to squash VMChangeSet"), ) })?; - Ok(change_set) + Ok((change_set, module_write_set)) } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/abort_hook.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/abort_hook.rs index 05581040ef17e..ecdad7704cab5 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/abort_hook.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/abort_hook.rs @@ -2,12 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - move_vm_ext::{session::respawned_session::RespawnedSession, AptosMoveResolver, SessionId}, + move_vm_ext::{ + session::{ + respawned_session::RespawnedSession, + user_transaction_sessions::session_change_sets::SystemSessionChangeSet, + }, + AptosMoveResolver, SessionId, + }, transaction_metadata::TransactionMetadata, AptosVM, }; -use aptos_vm_types::{change_set::VMChangeSet, storage::change_set_configs::ChangeSetConfigs}; +use aptos_vm_types::storage::change_set_configs::ChangeSetConfigs; use derive_more::{Deref, DerefMut}; +use move_binary_format::errors::Location; use move_core_types::vm_status::VMStatus; #[derive(Deref, DerefMut)] @@ -22,23 +29,41 @@ impl<'r, 'l> AbortHookSession<'r, 'l> { vm: &'l AptosVM, txn_meta: &'l TransactionMetadata, resolver: &'r impl AptosMoveResolver, - prologue_change_set: VMChangeSet, - ) -> Result { + prologue_session_change_set: SystemSessionChangeSet, + ) -> Self { let session_id = SessionId::run_on_abort(txn_meta); let session = RespawnedSession::spawn( vm, session_id, resolver, - prologue_change_set, + prologue_session_change_set.unpack(), Some(txn_meta.as_user_transaction_context()), - )?; + ); - Ok(Self { session }) + Self { session } } - pub fn finish(self, change_set_configs: &ChangeSetConfigs) -> Result { + pub fn finish( + self, + change_set_configs: &ChangeSetConfigs, + ) -> Result { let Self { session } = self; - session.finish_with_squashed_change_set(change_set_configs, false) + let (change_set, empty_module_write_set) = + session.finish_with_squashed_change_set(change_set_configs, false)?; + let abort_hook_session_change_set = + SystemSessionChangeSet::new(change_set, change_set_configs)?; + + // Abort hook can never publish modules (just like epilogue)! When we move publishing + // outside MoveVM, we do not need to have a check here. + empty_module_write_set + .is_empty_or_invariant_violation() + .map_err(|e| { + e.with_message("Non-empty module write set in abort hook session".to_string()) + .finish(Location::Undefined) + .into_vm_status() + })?; + + Ok(abort_hook_session_change_set) } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/epilogue.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/epilogue.rs index b6a217eb6d46d..0eff622833191 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/epilogue.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/epilogue.rs @@ -2,13 +2,29 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - move_vm_ext::{session::respawned_session::RespawnedSession, AptosMoveResolver, SessionId}, + move_vm_ext::{ + session::{ + respawned_session::RespawnedSession, + user_transaction_sessions::session_change_sets::{ + SystemSessionChangeSet, UserSessionChangeSet, + }, + }, + AptosMoveResolver, SessionId, + }, transaction_metadata::TransactionMetadata, AptosVM, }; use aptos_gas_algebra::Fee; -use aptos_vm_types::{change_set::VMChangeSet, storage::change_set_configs::ChangeSetConfigs}; +use aptos_types::{ + fee_statement::FeeStatement, + transaction::{ExecutionStatus, TransactionAuxiliaryData, TransactionStatus}, +}; +use aptos_vm_types::{ + change_set::VMChangeSet, module_write_set::ModuleWriteSet, output::VMOutput, + storage::change_set_configs::ChangeSetConfigs, +}; use derive_more::{Deref, DerefMut}; +use move_binary_format::errors::Location; use move_core_types::vm_status::VMStatus; #[derive(Deref, DerefMut)] @@ -17,16 +33,52 @@ pub struct EpilogueSession<'r, 'l> { #[deref_mut] session: RespawnedSession<'r, 'l>, storage_refund: Fee, + module_write_set: ModuleWriteSet, } impl<'r, 'l> EpilogueSession<'r, 'l> { - pub fn new( + pub fn on_user_session_success( + vm: &'l AptosVM, + txn_meta: &'l TransactionMetadata, + resolver: &'r impl AptosMoveResolver, + user_session_change_set: UserSessionChangeSet, + storage_refund: Fee, + ) -> Self { + let (change_set, module_write_set) = user_session_change_set.unpack(); + Self::new( + vm, + txn_meta, + resolver, + change_set, + module_write_set, + storage_refund, + ) + } + + pub fn on_user_session_failure( + vm: &'l AptosVM, + txn_meta: &'l TransactionMetadata, + resolver: &'r impl AptosMoveResolver, + previous_session_change_set: SystemSessionChangeSet, + ) -> Self { + Self::new( + vm, + txn_meta, + resolver, + previous_session_change_set.unpack(), + ModuleWriteSet::empty(), + 0.into(), + ) + } + + fn new( vm: &'l AptosVM, txn_meta: &'l TransactionMetadata, resolver: &'r impl AptosMoveResolver, previous_session_change_set: VMChangeSet, + module_write_set: ModuleWriteSet, storage_refund: Fee, - ) -> Result { + ) -> Self { let session_id = SessionId::epilogue_meta(txn_meta); let session = RespawnedSession::spawn( vm, @@ -34,23 +86,54 @@ impl<'r, 'l> EpilogueSession<'r, 'l> { resolver, previous_session_change_set, Some(txn_meta.as_user_transaction_context()), - )?; + ); - Ok(Self { + Self { session, storage_refund, - }) + module_write_set, + } } pub fn get_storage_fee_refund(&self) -> Fee { self.storage_refund } - pub fn finish(self, change_set_configs: &ChangeSetConfigs) -> Result { + pub fn finish( + self, + fee_statement: FeeStatement, + execution_status: ExecutionStatus, + txn_aux_data: TransactionAuxiliaryData, + change_set_configs: &ChangeSetConfigs, + ) -> Result { let Self { session, storage_refund: _, + module_write_set, } = self; - session.finish_with_squashed_change_set(change_set_configs, true) + + let (change_set, empty_module_write_set) = + session.finish_with_squashed_change_set(change_set_configs, true)?; + let epilogue_session_change_set = + UserSessionChangeSet::new(change_set, module_write_set, change_set_configs)?; + + // Epilogue can never publish modules! When we move publishing outside MoveVM, we do not need to have + // this check here, as modules will only be visible in user session. + empty_module_write_set + .is_empty_or_invariant_violation() + .map_err(|e| { + e.with_message("Non-empty module write set in epilogue session".to_string()) + .finish(Location::Undefined) + .into_vm_status() + })?; + + let (change_set, module_write_set) = epilogue_session_change_set.unpack(); + Ok(VMOutput::new( + change_set, + module_write_set, + fee_statement, + TransactionStatus::Keep(execution_status), + txn_aux_data, + )) } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/mod.rs index 611a566bb5b76..666e2b22b87ae 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/mod.rs @@ -4,4 +4,5 @@ pub mod abort_hook; pub mod epilogue; pub mod prologue; +pub mod session_change_sets; pub mod user; diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/prologue.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/prologue.rs index 0a3b9467173c1..28076b1eae87f 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/prologue.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/prologue.rs @@ -4,7 +4,10 @@ use crate::{ move_vm_ext::{ session::{ - respawned_session::RespawnedSession, user_transaction_sessions::user::UserSession, + respawned_session::RespawnedSession, + user_transaction_sessions::{ + session_change_sets::SystemSessionChangeSet, user::UserSession, + }, }, AptosMoveResolver, SessionId, }, @@ -13,6 +16,7 @@ use crate::{ }; use aptos_vm_types::{change_set::VMChangeSet, storage::change_set_configs::ChangeSetConfigs}; use derive_more::{Deref, DerefMut}; +use move_binary_format::errors::Location; use move_core_types::vm_status::VMStatus; #[derive(Deref, DerefMut)] @@ -27,7 +31,7 @@ impl<'r, 'l> PrologueSession<'r, 'l> { vm: &'l AptosVM, txn_meta: &'m TransactionMetadata, resolver: &'r impl AptosMoveResolver, - ) -> Result { + ) -> Self { let session_id = SessionId::prologue_meta(txn_meta); let session = RespawnedSession::spawn( vm, @@ -35,9 +39,9 @@ impl<'r, 'l> PrologueSession<'r, 'l> { resolver, VMChangeSet::empty(), Some(txn_meta.as_user_transaction_context()), - )?; + ); - Ok(Self { session }) + Self { session } } pub fn into_user_session( @@ -47,7 +51,7 @@ impl<'r, 'l> PrologueSession<'r, 'l> { resolver: &'r impl AptosMoveResolver, gas_feature_version: u64, change_set_configs: &ChangeSetConfigs, - ) -> Result<(VMChangeSet, UserSession<'r, 'l>), VMStatus> { + ) -> Result<(SystemSessionChangeSet, UserSession<'r, 'l>), VMStatus> { let Self { session } = self; if gas_feature_version >= 1 { @@ -59,16 +63,29 @@ impl<'r, 'l> PrologueSession<'r, 'l> { // By releasing resource group cache, we start with a fresh slate for resource group // cost accounting. - let change_set = session.finish_with_squashed_change_set(change_set_configs, false)?; - resolver.release_resource_group_cache(); + let (change_set, empty_module_write_set) = + session.finish_with_squashed_change_set(change_set_configs, false)?; + let prologue_session_change_set = + SystemSessionChangeSet::new(change_set.clone(), change_set_configs)?; + + // Prologue can never publish modules! When we move publishing outside MoveVM, we do not + // need to have this check here, as modules will only be visible in user session. + empty_module_write_set + .is_empty_or_invariant_violation() + .map_err(|e| { + e.with_message("Non-empty module write set in prologue session".to_string()) + .finish(Location::Undefined) + .into_vm_status() + })?; + resolver.release_resource_group_cache(); Ok(( - change_set.clone(), - UserSession::new(vm, txn_meta, resolver, change_set)?, + prologue_session_change_set, + UserSession::new(vm, txn_meta, resolver, change_set), )) } else { Ok(( - VMChangeSet::empty(), + SystemSessionChangeSet::empty(), UserSession::legacy_inherit_prologue_session(session), )) } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/session_change_sets.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/session_change_sets.rs new file mode 100644 index 0000000000000..38b4641bc8caf --- /dev/null +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/session_change_sets.rs @@ -0,0 +1,115 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_types::{ + contract_event::ContractEvent, state_store::state_key::StateKey, write_set::WriteOpSize, +}; +use aptos_vm_types::{ + change_set::{ChangeSetInterface, VMChangeSet, WriteOpInfo}, + module_write_set::ModuleWriteSet, + resolver::ExecutorView, + storage::change_set_configs::ChangeSetConfigs, +}; +use move_binary_format::errors::PartialVMResult; +use move_core_types::vm_status::VMStatus; + +#[derive(Clone)] +pub struct UserSessionChangeSet { + change_set: VMChangeSet, + module_write_set: ModuleWriteSet, +} + +impl UserSessionChangeSet { + pub(crate) fn new( + change_set: VMChangeSet, + module_write_set: ModuleWriteSet, + change_set_configs: &ChangeSetConfigs, + ) -> Result { + let user_session_change_set = Self { + change_set, + module_write_set, + }; + change_set_configs.check_change_set(&user_session_change_set)?; + Ok(user_session_change_set) + } + + pub(crate) fn has_modules_published_to_special_address(&self) -> bool { + self.module_write_set.has_writes_to_special_address() + } + + pub(crate) fn unpack(self) -> (VMChangeSet, ModuleWriteSet) { + (self.change_set, self.module_write_set) + } +} + +impl ChangeSetInterface for UserSessionChangeSet { + fn num_write_ops(&self) -> usize { + self.change_set.num_write_ops() + self.module_write_set.num_write_ops() + } + + fn write_set_size_iter(&self) -> impl Iterator { + self.change_set + .write_set_size_iter() + .chain(self.module_write_set.write_set_size_iter()) + } + + fn write_op_info_iter_mut<'a>( + &'a mut self, + executor_view: &'a dyn ExecutorView, + ) -> impl Iterator> { + self.change_set + .write_op_info_iter_mut(executor_view) + .chain(self.module_write_set.write_op_info_iter_mut(executor_view)) + } + + fn events_iter(&self) -> impl Iterator { + self.change_set.events_iter() + } +} + +#[derive(Clone)] +pub struct SystemSessionChangeSet { + change_set: VMChangeSet, +} + +impl SystemSessionChangeSet { + pub(crate) fn new( + change_set: VMChangeSet, + change_set_configs: &ChangeSetConfigs, + ) -> Result { + let system_session_change_set = Self { change_set }; + change_set_configs.check_change_set(&system_session_change_set)?; + Ok(system_session_change_set) + } + + pub(crate) fn empty() -> Self { + Self { + change_set: VMChangeSet::empty(), + } + } + + pub(crate) fn unpack(self) -> VMChangeSet { + self.change_set + } +} + +impl ChangeSetInterface for SystemSessionChangeSet { + fn num_write_ops(&self) -> usize { + self.change_set.num_write_ops() + } + + fn write_set_size_iter(&self) -> impl Iterator { + self.change_set.write_set_size_iter() + } + + fn write_op_info_iter_mut<'a>( + &'a mut self, + executor_view: &'a dyn ExecutorView, + ) -> impl Iterator> { + self.change_set.write_op_info_iter_mut(executor_view) + } + + fn events_iter(&self) -> impl Iterator { + self.change_set.events_iter() + } +} diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/user.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/user.rs index edccd9bde9ed0..ee018267145c1 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/user.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/user_transaction_sessions/user.rs @@ -2,7 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - move_vm_ext::{session::respawned_session::RespawnedSession, AptosMoveResolver, SessionId}, + move_vm_ext::{ + session::{ + respawned_session::RespawnedSession, + user_transaction_sessions::session_change_sets::UserSessionChangeSet, + }, + AptosMoveResolver, SessionId, + }, transaction_metadata::TransactionMetadata, AptosVM, }; @@ -23,7 +29,7 @@ impl<'r, 'l> UserSession<'r, 'l> { txn_meta: &'l TransactionMetadata, resolver: &'r impl AptosMoveResolver, prologue_change_set: VMChangeSet, - ) -> Result { + ) -> Self { let session_id = SessionId::txn_meta(txn_meta); let session = RespawnedSession::spawn( @@ -32,9 +38,9 @@ impl<'r, 'l> UserSession<'r, 'l> { resolver, prologue_change_set, Some(txn_meta.as_user_transaction_context()), - )?; + ); - Ok(Self { session }) + Self { session } } pub fn legacy_inherit_prologue_session(prologue_session: RespawnedSession<'r, 'l>) -> Self { @@ -43,8 +49,13 @@ impl<'r, 'l> UserSession<'r, 'l> { } } - pub fn finish(self, change_set_configs: &ChangeSetConfigs) -> Result { + pub fn finish( + self, + change_set_configs: &ChangeSetConfigs, + ) -> Result { let Self { session } = self; - session.finish_with_squashed_change_set(change_set_configs, false) + let (change_set, module_write_set) = + session.finish_with_squashed_change_set(change_set_configs, false)?; + UserSessionChangeSet::new(change_set, module_write_set, change_set_configs) } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/view_with_change_set.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/view_with_change_set.rs index 6566d980006c4..50e94ddba816e 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/view_with_change_set.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/view_with_change_set.rs @@ -315,10 +315,7 @@ impl<'r> TModuleView for ExecutorViewWithChangeSet<'r> { type Key = StateKey; fn get_module_state_value(&self, state_key: &Self::Key) -> PartialVMResult> { - match self.change_set.module_write_set().get(state_key) { - Some(write_op) => Ok(write_op.as_state_value()), - None => self.base_executor_view.get_module_state_value(state_key), - } + self.base_executor_view.get_module_state_value(state_key) } } @@ -344,20 +341,12 @@ mod test { use aptos_aggregator::delta_change_set::{delta_add, serialize}; use aptos_language_e2e_tests::data_store::FakeDataStore; use aptos_types::{account_address::AccountAddress, write_set::WriteOp}; - use aptos_vm_types::{abstract_write_op::GroupWrite, check_change_set::CheckChangeSet}; + use aptos_vm_types::abstract_write_op::GroupWrite; use move_core_types::{ identifier::Identifier, language_storage::{StructTag, TypeTag}, }; - struct NoOpChangeSetChecker; - - impl CheckChangeSet for NoOpChangeSetChecker { - fn check_change_set(&self, _change_set: &VMChangeSet) -> PartialVMResult<()> { - Ok(()) - } - } - fn key(s: impl ToString) -> StateKey { StateKey::raw(s.to_string().as_bytes()) } @@ -423,7 +412,6 @@ mod test { fn test_change_set_state_view() { let mut state_view = FakeDataStore::default(); state_view.set_legacy(key("module_base"), serialize(&10)); - state_view.set_legacy(key("module_both"), serialize(&20)); state_view.set_legacy(key("resource_base"), serialize(&30)); state_view.set_legacy(key("resource_both"), serialize(&40)); @@ -444,11 +432,6 @@ mod test { (key("resource_write_set"), (write(90), None)), ]); - let module_write_set = BTreeMap::from([ - (key("module_both"), write(100)), - (key("module_write_set"), write(110)), - ]); - let aggregator_v1_write_set = BTreeMap::from([ (key("aggregator_both"), write(120)), (key("aggregator_write_set"), write(130)), @@ -494,14 +477,12 @@ mod test { let change_set = VMChangeSet::new_expanded( resource_write_set, resource_group_write_set, - module_write_set, aggregator_v1_write_set, aggregator_v1_delta_set, BTreeMap::new(), BTreeMap::new(), BTreeMap::new(), vec![], - &NoOpChangeSetChecker, ) .unwrap(); @@ -513,8 +494,6 @@ mod test { ); assert_eq!(read_module(&view, "module_base"), 10); - assert_eq!(read_module(&view, "module_both"), 100); - assert_eq!(read_module(&view, "module_write_set"), 110); assert_eq!(read_resource(&view, "resource_base"), 30); assert_eq!(read_resource(&view, "resource_both"), 80); diff --git a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs index 6f70bbe796167..5a3356a9da240 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs @@ -40,13 +40,8 @@ impl GenesisMoveVM { let features = Features::default(); let timed_features = TimedFeaturesBuilder::enable_all().build(); - let pseudo_meter_vector_ty_to_ty_tag_construction = true; - let vm_config = aptos_prod_vm_config( - &features, - &timed_features, - pseudo_meter_vector_ty_to_ty_tag_construction, - aptos_default_ty_builder(&features), - ); + let vm_config = + aptos_prod_vm_config(&features, &timed_features, aptos_default_ty_builder()); // All genesis sessions run with unmetered gas meter, and here we set // the gas parameters for natives as zeros (because they do not matter). @@ -60,7 +55,7 @@ impl GenesisMoveVM { ); let vm = MoveVM::new_with_config( - aptos_natives_with_builder(&mut native_builder), + aptos_natives_with_builder(&mut native_builder, false), vm_config.clone(), ); @@ -105,6 +100,7 @@ impl MoveVmExt { gas_params: Result<&AptosGasParameters, &String>, env: Arc, gas_hook: Option>, + inject_create_signer_for_gov_sim: bool, resolver: &impl AptosMoveResolver, ) -> Self { // TODO(Gas): Right now, we have to use some dummy values for gas parameters if they are not found on-chain. @@ -113,8 +109,7 @@ impl MoveVmExt { // We should clean up the logic here once we get that refactored. let (native_gas_params, misc_gas_params, ty_builder) = match gas_params { Ok(gas_params) => { - let ty_builder = - aptos_prod_ty_builder(env.features(), gas_feature_version, gas_params); + let ty_builder = aptos_prod_ty_builder(gas_feature_version, gas_params); ( gas_params.natives.clone(), gas_params.vm.misc.clone(), @@ -122,7 +117,7 @@ impl MoveVmExt { ) }, Err(_) => { - let ty_builder = aptos_default_ty_builder(env.features()); + let ty_builder = aptos_default_ty_builder(); ( NativeGasParameters::zeros(), MiscGasParameters::zeros(), @@ -142,9 +137,10 @@ impl MoveVmExt { // TODO(George): Move gas configs to environment to avoid this clone! let mut vm_config = env.vm_config().clone(); - vm_config.pseudo_meter_vector_ty_to_ty_tag_construction = - gas_feature_version >= RELEASE_V1_16; vm_config.ty_builder = ty_builder; + vm_config.disallow_dispatch_for_native = env + .features() + .is_enabled(FeatureFlag::DISALLOW_USER_NATIVES); Self { inner: WarmVmCache::get_warm_vm( @@ -152,6 +148,7 @@ impl MoveVmExt { vm_config, resolver, env.features().is_enabled(FeatureFlag::VM_BINARY_FORMAT_V7), + inject_create_signer_for_gov_sim, ) .expect("should be able to create Move VM; check if there are duplicated natives"), env, @@ -164,17 +161,25 @@ impl MoveVmExt { env: Arc, resolver: &impl AptosMoveResolver, ) -> Self { - Self::new_impl(gas_feature_version, gas_params, env, None, resolver) + Self::new_impl(gas_feature_version, gas_params, env, None, false, resolver) } - pub fn new_with_gas_hook( + pub fn new_with_extended_options( gas_feature_version: u64, gas_params: Result<&AptosGasParameters, &String>, env: Arc, gas_hook: Option>, + inject_create_signer_for_gov_sim: bool, resolver: &impl AptosMoveResolver, ) -> Self { - Self::new_impl(gas_feature_version, gas_params, env, gas_hook, resolver) + Self::new_impl( + gas_feature_version, + gas_params, + env, + gas_hook, + inject_create_signer_for_gov_sim, + resolver, + ) } pub fn new_session<'r, R: AptosMoveResolver>( diff --git a/aptos-move/aptos-vm/src/move_vm_ext/warm_vm_cache.rs b/aptos-move/aptos-vm/src/move_vm_ext/warm_vm_cache.rs index d7f58bff2b542..5f7b47ca41edc 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/warm_vm_cache.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/warm_vm_cache.rs @@ -30,14 +30,25 @@ static WARM_VM_CACHE: Lazy = Lazy::new(|| WarmVmCache { cache: RwLock::new(HashMap::new()), }); +pub fn flush_warm_vm_cache() { + WARM_VM_CACHE.cache.write().clear(); +} + impl WarmVmCache { pub(crate) fn get_warm_vm( native_builder: SafeNativeBuilder, vm_config: VMConfig, resolver: &impl AptosMoveResolver, bin_v7_enabled: bool, + inject_create_signer_for_gov_sim: bool, ) -> VMResult { - WARM_VM_CACHE.get(native_builder, vm_config, resolver, bin_v7_enabled) + WARM_VM_CACHE.get( + native_builder, + vm_config, + resolver, + bin_v7_enabled, + inject_create_signer_for_gov_sim, + ) } fn get( @@ -46,11 +57,18 @@ impl WarmVmCache { vm_config: VMConfig, resolver: &impl AptosMoveResolver, bin_v7_enabled: bool, + inject_create_signer_for_gov_sim: bool, ) -> VMResult { let _timer = TIMER.timer_with(&["warm_vm_get"]); let id = { let _timer = TIMER.timer_with(&["get_warm_vm_id"]); - WarmVmId::new(&native_builder, &vm_config, resolver, bin_v7_enabled)? + WarmVmId::new( + &native_builder, + &vm_config, + resolver, + bin_v7_enabled, + inject_create_signer_for_gov_sim, + )? }; if let Some(vm) = self.cache.read().get(&id) { @@ -66,8 +84,10 @@ impl WarmVmCache { return Ok(vm.clone()); } - let vm = - MoveVM::new_with_config(aptos_natives_with_builder(&mut native_builder), vm_config); + let vm = MoveVM::new_with_config( + aptos_natives_with_builder(&mut native_builder, inject_create_signer_for_gov_sim), + vm_config, + ); Self::warm_vm_up(&vm, resolver); // Not using LruCache because its `::get()` requires &mut self @@ -102,6 +122,7 @@ struct WarmVmId { vm_config: Bytes, core_packages_registry: Option, bin_v7_enabled: bool, + inject_create_signer_for_gov_sim: bool, } impl WarmVmId { @@ -110,6 +131,7 @@ impl WarmVmId { vm_config: &VMConfig, resolver: &impl AptosMoveResolver, bin_v7_enabled: bool, + inject_create_signer_for_gov_sim: bool, ) -> VMResult { let natives = { let _timer = TIMER.timer_with(&["serialize_native_builder"]); @@ -120,6 +142,7 @@ impl WarmVmId { vm_config: Self::vm_config_bytes(vm_config), core_packages_registry: Self::core_packages_id_bytes(resolver)?, bin_v7_enabled, + inject_create_signer_for_gov_sim, }) } diff --git a/aptos-move/aptos-vm/src/natives.rs b/aptos-move/aptos-vm/src/natives.rs index b3732c97507ad..4fb5a39b11273 100644 --- a/aptos-move/aptos-vm/src/natives.rs +++ b/aptos-move/aptos-vm/src/natives.rs @@ -162,10 +162,13 @@ pub fn aptos_natives( None, ); - aptos_natives_with_builder(&mut builder) + aptos_natives_with_builder(&mut builder, false) } -pub fn aptos_natives_with_builder(builder: &mut SafeNativeBuilder) -> NativeFunctionTable { +pub fn aptos_natives_with_builder( + builder: &mut SafeNativeBuilder, + inject_create_signer_for_gov_sim: bool, +) -> NativeFunctionTable { #[allow(unreachable_code)] aptos_move_stdlib::natives::all_natives(CORE_CODE_ADDRESS, builder) .into_iter() @@ -173,6 +176,7 @@ pub fn aptos_natives_with_builder(builder: &mut SafeNativeBuilder) -> NativeFunc .chain(aptos_framework::natives::all_natives( CORE_CODE_ADDRESS, builder, + inject_create_signer_for_gov_sim, )) .chain(aptos_table_natives::table_natives( CORE_CODE_ADDRESS, diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs index a2d2e76e9e6e6..e968f2416d47c 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs @@ -135,7 +135,7 @@ impl ShardedExecutorService { ); }); s.spawn(move |_| { - let ret = BlockAptosVM::execute_block( + let ret = BlockAptosVM::execute_block_on_thread_pool( executor_thread_pool, &signature_verified_transactions, aggr_overridden_state_view.as_ref(), diff --git a/aptos-move/aptos-vm/src/testing.rs b/aptos-move/aptos-vm/src/testing.rs index 6a9c67fb07eb6..27edf50fa0d6c 100644 --- a/aptos-move/aptos-vm/src/testing.rs +++ b/aptos-move/aptos-vm/src/testing.rs @@ -5,6 +5,7 @@ use crate::AptosVM; #[cfg(any(test, feature = "testing"))] use crate::{ aptos_vm::get_or_vm_startup_failure, data_cache::AsMoveResolver, + move_vm_ext::session::user_transaction_sessions::session_change_sets::SystemSessionChangeSet, transaction_metadata::TransactionMetadata, }; #[cfg(any(test, feature = "testing"))] @@ -12,7 +13,7 @@ use aptos_types::{state_store::StateView, transaction::SignedTransaction}; #[cfg(any(test, feature = "testing"))] use aptos_vm_logging::log_schema::AdapterLogSchema; #[cfg(any(test, feature = "testing"))] -use aptos_vm_types::{change_set::VMChangeSet, output::VMOutput}; +use aptos_vm_types::output::VMOutput; use move_binary_format::errors::VMResult; #[cfg(any(test, feature = "testing"))] use move_core_types::vm_status::VMStatus; @@ -103,7 +104,7 @@ impl AptosVM { let resolver = state_view.as_move_resolver(); let storage = TraversalStorage::new(); self.failed_transaction_cleanup( - VMChangeSet::empty(), + SystemSessionChangeSet::empty(), error_vm_status, &mut gas_meter, &txn_data, diff --git a/aptos-move/aptos-vm/src/transaction_metadata.rs b/aptos-move/aptos-vm/src/transaction_metadata.rs index e73d53acc19c0..65358aee33e82 100644 --- a/aptos-move/aptos-vm/src/transaction_metadata.rs +++ b/aptos-move/aptos-vm/src/transaction_metadata.rs @@ -37,20 +37,29 @@ impl TransactionMetadata { pub fn new(txn: &SignedTransaction) -> Self { Self { sender: txn.sender(), - authentication_key: txn.authenticator().sender().authentication_key().to_vec(), + authentication_key: txn + .authenticator() + .sender() + .authentication_key() + .map_or_else(Vec::new, |auth_key| auth_key.to_vec()), secondary_signers: txn.authenticator().secondary_signer_addresses(), secondary_authentication_keys: txn .authenticator() .secondary_signers() .iter() - .map(|account_auth| account_auth.authentication_key().to_vec()) + .map(|account_auth| { + account_auth + .authentication_key() + .map_or_else(Vec::new, |auth_key| auth_key.to_vec()) + }) .collect(), sequence_number: txn.sequence_number(), fee_payer: txn.authenticator_ref().fee_payer_address(), - fee_payer_authentication_key: txn - .authenticator() - .fee_payer_signer() - .map(|signer| signer.authentication_key().to_vec()), + fee_payer_authentication_key: txn.authenticator().fee_payer_signer().map(|signer| { + signer + .authentication_key() + .map_or_else(Vec::new, |auth_key| auth_key.to_vec()) + }), max_gas_amount: txn.max_gas_amount().into(), gas_unit_price: txn.gas_unit_price().into(), transaction_size: (txn.raw_txn_bytes_len() as u64).into(), diff --git a/aptos-move/aptos-vm/src/transaction_validation.rs b/aptos-move/aptos-vm/src/transaction_validation.rs index c90f7bd8c43e0..229833b71f01b 100644 --- a/aptos-move/aptos-vm/src/transaction_validation.rs +++ b/aptos-move/aptos-vm/src/transaction_validation.rs @@ -40,6 +40,14 @@ pub static APTOS_TRANSACTION_VALIDATION: Lazy = multi_agent_prologue_name: Identifier::new("multi_agent_script_prologue").unwrap(), user_epilogue_name: Identifier::new("epilogue").unwrap(), user_epilogue_gas_payer_name: Identifier::new("epilogue_gas_payer").unwrap(), + fee_payer_prologue_extended_name: Identifier::new("fee_payer_script_prologue_extended") + .unwrap(), + script_prologue_extended_name: Identifier::new("script_prologue_extended").unwrap(), + multi_agent_prologue_extended_name: Identifier::new("multi_agent_script_prologue_extended") + .unwrap(), + user_epilogue_extended_name: Identifier::new("epilogue_extended").unwrap(), + user_epilogue_gas_payer_extended_name: Identifier::new("epilogue_gas_payer_extended") + .unwrap(), }); /// On-chain functions used to validate transactions @@ -52,6 +60,11 @@ pub struct TransactionValidation { pub multi_agent_prologue_name: Identifier, pub user_epilogue_name: Identifier, pub user_epilogue_gas_payer_name: Identifier, + pub fee_payer_prologue_extended_name: Identifier, + pub script_prologue_extended_name: Identifier, + pub multi_agent_prologue_extended_name: Identifier, + pub user_epilogue_extended_name: Identifier, + pub user_epilogue_gas_payer_extended_name: Identifier, } impl TransactionValidation { @@ -72,8 +85,10 @@ impl TransactionValidation { pub(crate) fn run_script_prologue( session: &mut SessionExt, txn_data: &TransactionMetadata, + features: &Features, log_context: &AdapterLogSchema, traversal_context: &mut TraversalContext, + is_simulation: bool, ) -> Result<(), VMStatus> { let txn_sequence_number = txn_data.sequence_number(); let txn_authentication_key = txn_data.authentication_key().to_vec(); @@ -91,48 +106,107 @@ pub(crate) fn run_script_prologue( txn_data.fee_payer(), txn_data.fee_payer_authentication_key.as_ref(), ) { - let args = vec![ - MoveValue::Signer(txn_data.sender), - MoveValue::U64(txn_sequence_number), - MoveValue::vector_u8(txn_authentication_key), - MoveValue::vector_address(txn_data.secondary_signers()), - MoveValue::Vector(secondary_auth_keys), - MoveValue::Address(fee_payer), - MoveValue::vector_u8(fee_payer_auth_key.to_vec()), - MoveValue::U64(txn_gas_price.into()), - MoveValue::U64(txn_max_gas_units.into()), - MoveValue::U64(txn_expiration_timestamp_secs), - MoveValue::U8(chain_id.id()), - ]; - (&APTOS_TRANSACTION_VALIDATION.fee_payer_prologue_name, args) + if features.is_transaction_simulation_enhancement_enabled() { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(txn_sequence_number), + MoveValue::vector_u8(txn_authentication_key), + MoveValue::vector_address(txn_data.secondary_signers()), + MoveValue::Vector(secondary_auth_keys), + MoveValue::Address(fee_payer), + MoveValue::vector_u8(fee_payer_auth_key.to_vec()), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(txn_expiration_timestamp_secs), + MoveValue::U8(chain_id.id()), + MoveValue::Bool(is_simulation), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.fee_payer_prologue_extended_name, + args, + ) + } else { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(txn_sequence_number), + MoveValue::vector_u8(txn_authentication_key), + MoveValue::vector_address(txn_data.secondary_signers()), + MoveValue::Vector(secondary_auth_keys), + MoveValue::Address(fee_payer), + MoveValue::vector_u8(fee_payer_auth_key.to_vec()), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(txn_expiration_timestamp_secs), + MoveValue::U8(chain_id.id()), + ]; + (&APTOS_TRANSACTION_VALIDATION.fee_payer_prologue_name, args) + } } else if txn_data.is_multi_agent() { - let args = vec![ - MoveValue::Signer(txn_data.sender), - MoveValue::U64(txn_sequence_number), - MoveValue::vector_u8(txn_authentication_key), - MoveValue::vector_address(txn_data.secondary_signers()), - MoveValue::Vector(secondary_auth_keys), - MoveValue::U64(txn_gas_price.into()), - MoveValue::U64(txn_max_gas_units.into()), - MoveValue::U64(txn_expiration_timestamp_secs), - MoveValue::U8(chain_id.id()), - ]; - ( - &APTOS_TRANSACTION_VALIDATION.multi_agent_prologue_name, - args, - ) + if features.is_transaction_simulation_enhancement_enabled() { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(txn_sequence_number), + MoveValue::vector_u8(txn_authentication_key), + MoveValue::vector_address(txn_data.secondary_signers()), + MoveValue::Vector(secondary_auth_keys), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(txn_expiration_timestamp_secs), + MoveValue::U8(chain_id.id()), + MoveValue::Bool(is_simulation), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.multi_agent_prologue_extended_name, + args, + ) + } else { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(txn_sequence_number), + MoveValue::vector_u8(txn_authentication_key), + MoveValue::vector_address(txn_data.secondary_signers()), + MoveValue::Vector(secondary_auth_keys), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(txn_expiration_timestamp_secs), + MoveValue::U8(chain_id.id()), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.multi_agent_prologue_name, + args, + ) + } } else { - let args = vec![ - MoveValue::Signer(txn_data.sender), - MoveValue::U64(txn_sequence_number), - MoveValue::vector_u8(txn_authentication_key), - MoveValue::U64(txn_gas_price.into()), - MoveValue::U64(txn_max_gas_units.into()), - MoveValue::U64(txn_expiration_timestamp_secs), - MoveValue::U8(chain_id.id()), - MoveValue::vector_u8(txn_data.script_hash.clone()), - ]; - (&APTOS_TRANSACTION_VALIDATION.script_prologue_name, args) + #[allow(clippy::collapsible_else_if)] + if features.is_transaction_simulation_enhancement_enabled() { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(txn_sequence_number), + MoveValue::vector_u8(txn_authentication_key), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(txn_expiration_timestamp_secs), + MoveValue::U8(chain_id.id()), + MoveValue::vector_u8(txn_data.script_hash.clone()), + MoveValue::Bool(is_simulation), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.script_prologue_extended_name, + args, + ) + } else { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(txn_sequence_number), + MoveValue::vector_u8(txn_authentication_key), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(txn_expiration_timestamp_secs), + MoveValue::U8(chain_id.id()), + MoveValue::vector_u8(txn_data.script_hash.clone()), + ]; + (&APTOS_TRANSACTION_VALIDATION.script_prologue_name, args) + } }; session .execute_function_bypass_visibility( @@ -198,6 +272,7 @@ fn run_epilogue( txn_data: &TransactionMetadata, features: &Features, traversal_context: &mut TraversalContext, + is_simulation: bool, ) -> VMResult<()> { let txn_gas_price = txn_data.gas_unit_price(); let txn_max_gas_units = txn_data.max_gas_amount(); @@ -206,18 +281,34 @@ fn run_epilogue( // accepted it, in which case the gas payer feature is enabled. if let Some(fee_payer) = txn_data.fee_payer() { let (func_name, args) = { - let args = vec![ - MoveValue::Signer(txn_data.sender), - MoveValue::Address(fee_payer), - MoveValue::U64(fee_statement.storage_fee_refund()), - MoveValue::U64(txn_gas_price.into()), - MoveValue::U64(txn_max_gas_units.into()), - MoveValue::U64(gas_remaining.into()), - ]; - ( - &APTOS_TRANSACTION_VALIDATION.user_epilogue_gas_payer_name, - args, - ) + if features.is_transaction_simulation_enhancement_enabled() { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::Address(fee_payer), + MoveValue::U64(fee_statement.storage_fee_refund()), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(gas_remaining.into()), + MoveValue::Bool(is_simulation), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.user_epilogue_gas_payer_extended_name, + args, + ) + } else { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::Address(fee_payer), + MoveValue::U64(fee_statement.storage_fee_refund()), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(gas_remaining.into()), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.user_epilogue_gas_payer_name, + args, + ) + } }; session.execute_function_bypass_visibility( &APTOS_TRANSACTION_VALIDATION.module_id(), @@ -230,14 +321,29 @@ fn run_epilogue( } else { // Regular tx, run the normal epilogue let (func_name, args) = { - let args = vec![ - MoveValue::Signer(txn_data.sender), - MoveValue::U64(fee_statement.storage_fee_refund()), - MoveValue::U64(txn_gas_price.into()), - MoveValue::U64(txn_max_gas_units.into()), - MoveValue::U64(gas_remaining.into()), - ]; - (&APTOS_TRANSACTION_VALIDATION.user_epilogue_name, args) + if features.is_transaction_simulation_enhancement_enabled() { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(fee_statement.storage_fee_refund()), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(gas_remaining.into()), + MoveValue::Bool(is_simulation), + ]; + ( + &APTOS_TRANSACTION_VALIDATION.user_epilogue_extended_name, + args, + ) + } else { + let args = vec![ + MoveValue::Signer(txn_data.sender), + MoveValue::U64(fee_statement.storage_fee_refund()), + MoveValue::U64(txn_gas_price.into()), + MoveValue::U64(txn_max_gas_units.into()), + MoveValue::U64(gas_remaining.into()), + ]; + (&APTOS_TRANSACTION_VALIDATION.user_epilogue_name, args) + } }; session.execute_function_bypass_visibility( &APTOS_TRANSACTION_VALIDATION.module_id(), @@ -288,6 +394,7 @@ pub(crate) fn run_success_epilogue( txn_data: &TransactionMetadata, log_context: &AdapterLogSchema, traversal_context: &mut TraversalContext, + is_simulation: bool, ) -> Result<(), VMStatus> { fail_point!("move_adapter::run_success_epilogue", |_| { Err(VMStatus::error( @@ -303,6 +410,7 @@ pub(crate) fn run_success_epilogue( txn_data, features, traversal_context, + is_simulation, ) .or_else(|err| convert_epilogue_error(err, log_context)) } @@ -317,6 +425,7 @@ pub(crate) fn run_failure_epilogue( txn_data: &TransactionMetadata, log_context: &AdapterLogSchema, traversal_context: &mut TraversalContext, + is_simulation: bool, ) -> Result<(), VMStatus> { run_epilogue( session, @@ -325,6 +434,7 @@ pub(crate) fn run_failure_epilogue( txn_data, features, traversal_context, + is_simulation, ) .or_else(|e| { expect_only_successful_execution( diff --git a/aptos-move/aptos-vm/src/validator_txns/dkg.rs b/aptos-move/aptos-vm/src/validator_txns/dkg.rs index dd5f77dd174ff..a0a57cdb04bcc 100644 --- a/aptos-move/aptos-vm/src/validator_txns/dkg.rs +++ b/aptos-move/aptos-vm/src/validator_txns/dkg.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - aptos_vm::get_or_vm_startup_failure, + aptos_vm::{get_or_vm_startup_failure, get_system_transaction_output}, errors::expect_only_successful_execution, move_vm_ext::{AptosMoveResolver, SessionId}, system_module_names::{FINISH_WITH_DKG_RESULT, RECONFIGURATION_WITH_DKG_MODULE}, @@ -14,10 +14,9 @@ use crate::{ }; use aptos_types::{ dkg::{DKGState, DKGTrait, DKGTranscript, DefaultDKG}, - fee_statement::FeeStatement, move_utils::as_move_value::AsMoveValue, on_chain_config::{ConfigurationResource, OnChainConfig}, - transaction::{ExecutionStatus, TransactionStatus}, + transaction::TransactionStatus, }; use aptos_vm_logging::log_schema::AdapterLogSchema; use aptos_vm_types::output::VMOutput; @@ -121,10 +120,8 @@ impl AptosVM { }) .map_err(|r| Unexpected(r.unwrap_err()))?; - let output = crate::aptos_vm::get_system_transaction_output( + let output = get_system_transaction_output( session, - FeeStatement::zero(), - ExecutionStatus::Success, &get_or_vm_startup_failure(&self.storage_gas_params, log_context) .map_err(Unexpected)? .change_set_configs, diff --git a/aptos-move/aptos-vm/src/validator_txns/jwk.rs b/aptos-move/aptos-vm/src/validator_txns/jwk.rs index 9b994e911d723..1d6e80f1e5cb6 100644 --- a/aptos-move/aptos-vm/src/validator_txns/jwk.rs +++ b/aptos-move/aptos-vm/src/validator_txns/jwk.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - aptos_vm::get_or_vm_startup_failure, + aptos_vm::{get_or_vm_startup_failure, get_system_transaction_output}, errors::expect_only_successful_execution, move_vm_ext::{AptosMoveResolver, SessionId}, system_module_names::{JWKS_MODULE, UPSERT_INTO_OBSERVED_JWKS}, @@ -17,12 +17,11 @@ use crate::{ }; use aptos_logger::debug; use aptos_types::{ - fee_statement::FeeStatement, jwks, jwks::{Issuer, ObservedJWKs, ProviderJWKs, QuorumCertifiedUpdate}, move_utils::as_move_value::AsMoveValue, on_chain_config::{OnChainConfig, ValidatorSet}, - transaction::{ExecutionStatus, TransactionStatus}, + transaction::TransactionStatus, validator_verifier::ValidatorVerifier, }; use aptos_vm_logging::log_schema::AdapterLogSchema; @@ -151,10 +150,8 @@ impl AptosVM { }) .map_err(|r| Unexpected(r.unwrap_err()))?; - let output = crate::aptos_vm::get_system_transaction_output( + let output = get_system_transaction_output( session, - FeeStatement::zero(), - ExecutionStatus::Success, &get_or_vm_startup_failure(&self.storage_gas_params, log_context) .map_err(Unexpected)? .change_set_configs, diff --git a/aptos-move/aptos-vm/src/verifier/event_validation.rs b/aptos-move/aptos-vm/src/verifier/event_validation.rs index 6b1e2fb1e10e8..761a94bb2652d 100644 --- a/aptos-move/aptos-vm/src/verifier/event_validation.rs +++ b/aptos-move/aptos-vm/src/verifier/event_validation.rs @@ -5,7 +5,6 @@ use crate::move_vm_ext::SessionExt; use aptos_framework::RuntimeModuleMetadataV1; use move_binary_format::{ access::{ModuleAccess, ScriptAccess}, - deserializer::DeserializerConfig, errors::{Location, PartialVMError, VMError, VMResult}, file_format::{ Bytecode, CompiledScript, @@ -151,19 +150,7 @@ pub(crate) fn extract_event_metadata( Ok(event_structs) } -pub(crate) fn verify_no_event_emission_in_script( - script_code: &[u8], - config: &DeserializerConfig, -) -> VMResult<()> { - let script = match CompiledScript::deserialize_with_config(script_code, config) { - Ok(script) => script, - Err(err) => { - let msg = format!("[VM] deserializer for script returned error: {:?}", err); - return Err(PartialVMError::new(StatusCode::CODE_DESERIALIZATION_ERROR) - .with_message(msg) - .finish(Location::Script)); - }, - }; +pub(crate) fn verify_no_event_emission_in_compiled_script(script: &CompiledScript) -> VMResult<()> { for bc in &script.code().code { if let Bytecode::CallGeneric(index) = bc { let func_instantiation = &script.function_instantiation_at(*index); diff --git a/aptos-move/aptos-vm/src/verifier/mod.rs b/aptos-move/aptos-vm/src/verifier/mod.rs index 0240e6e673ff9..9feafff4b77eb 100644 --- a/aptos-move/aptos-vm/src/verifier/mod.rs +++ b/aptos-move/aptos-vm/src/verifier/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 pub(crate) mod event_validation; pub(crate) mod module_init; +pub(crate) mod native_validation; pub(crate) mod randomness; pub(crate) mod resource_groups; pub mod transaction_arg_validation; diff --git a/aptos-move/aptos-vm/src/verifier/native_validation.rs b/aptos-move/aptos-vm/src/verifier/native_validation.rs new file mode 100644 index 0000000000000..161361b94de7a --- /dev/null +++ b/aptos-move/aptos-vm/src/verifier/native_validation.rs @@ -0,0 +1,28 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use move_binary_format::{ + access::ModuleAccess, + errors::{Location, PartialVMError, VMResult}, + CompiledModule, +}; +use move_core_types::vm_status::StatusCode; + +/// Validate that only system address can publish new natives. +pub(crate) fn validate_module_natives(modules: &[CompiledModule]) -> VMResult<()> { + for module in modules { + let module_address = module.self_addr(); + for def in module.function_defs() { + if def.is_native() && !module_address.is_special() { + return Err( + PartialVMError::new(StatusCode::USER_DEFINED_NATIVE_NOT_ALLOWED) + .with_message( + "Cannot publish native function to non-special address".to_string(), + ) + .finish(Location::Module(module.self_id())), + ); + } + } + } + Ok(()) +} diff --git a/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs b/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs index 2e60d1cefb188..b4be0a60880de 100644 --- a/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs +++ b/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs @@ -135,12 +135,7 @@ pub fn validate_combine_signer_and_txn_args( // Need to keep this here to ensure we return the historic correct error code for replay for ty in func.param_tys()[signer_param_cnt..].iter() { let subst_res = ty_builder.create_ty_with_subst(ty, func.ty_args()); - let ty = if ty_builder.is_legacy() { - subst_res.unwrap() - } else { - subst_res.map_err(|e| e.finish(Location::Undefined).into_vm_status())? - }; - + let ty = subst_res.map_err(|e| e.finish(Location::Undefined).into_vm_status())?; let valid = is_valid_txn_arg(session, &ty, allowed_structs); if !valid { return Err(VMStatus::error( @@ -235,12 +230,7 @@ pub(crate) fn construct_args( let ty_builder = session.get_ty_builder(); for (ty, arg) in types.iter().zip(args) { let subst_res = ty_builder.create_ty_with_subst(ty, ty_args); - let ty = if ty_builder.is_legacy() { - subst_res.unwrap() - } else { - subst_res.map_err(|e| e.finish(Location::Undefined).into_vm_status())? - }; - + let ty = subst_res.map_err(|e| e.finish(Location::Undefined).into_vm_status())?; let arg = construct_arg(session, &ty, allowed_structs, arg, &mut gas_meter, is_view)?; res_args.push(arg); } diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index 88bb0dd279dcd..cde73c85b7992 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -464,6 +464,7 @@ where shared_counter: &AtomicU32, executor: &E, block: &[T], + num_workers: usize, ) -> Result<(), PanicOr> { let mut block_limit_processor = shared_commit_state.acquire(); @@ -592,6 +593,7 @@ where block_limit_processor.finish_parallel_update_counters_and_log_info( txn_idx + 1, scheduler.num_txns(), + num_workers, ); // failpoint triggering error at the last committed transaction, @@ -756,6 +758,7 @@ where shared_counter: &AtomicU32, shared_commit_state: &ExplicitSyncWrapper>, final_results: &ExplicitSyncWrapper>, + num_workers: usize, ) -> Result<(), PanicOr> { // Make executor for each task. TODO: fast concurrent executor. let init_timer = VM_INIT_SECONDS.start_timer(); @@ -795,6 +798,7 @@ where shared_counter, &executor, block, + num_workers, )?; scheduler.queueing_commits_mark_done(); } @@ -883,7 +887,7 @@ where } let num_txns = signature_verified_block.len(); - let concurrency_level = self.config.local.concurrency_level.min(num_txns / 2).max(2); + let num_workers = self.config.local.concurrency_level.min(num_txns / 2).max(2); let shared_commit_state = ExplicitSyncWrapper::new(BlockGasLimitProcessor::new( self.config.onchain.block_gas_limit_type.clone(), @@ -906,7 +910,7 @@ where let timer = RAYON_EXECUTION_SECONDS.start_timer(); self.executor_thread_pool.scope(|s| { - for _ in 0..concurrency_level { + for _ in 0..num_workers { s.spawn(|_| { if let Err(err) = self.worker_loop( env, @@ -919,6 +923,7 @@ where &shared_counter, &shared_commit_state, &final_results, + num_workers, ) { // If there are multiple errors, they all get logged: // ModulePathReadWriteError and FatalVMError variant is logged at construction, diff --git a/aptos-move/block-executor/src/limit_processor.rs b/aptos-move/block-executor/src/limit_processor.rs index cbfc38d98a83b..b687da43e1e39 100644 --- a/aptos-move/block-executor/src/limit_processor.rs +++ b/aptos-move/block-executor/src/limit_processor.rs @@ -9,6 +9,7 @@ use aptos_types::{ transaction::{block_epilogue::BlockEndInfo, BlockExecutableTransaction as Transaction}, }; use claims::{assert_le, assert_none}; +use std::time::Instant; pub struct BlockGasLimitProcessor { block_gas_limit_type: BlockGasLimitType, @@ -18,6 +19,7 @@ pub struct BlockGasLimitProcessor { txn_fee_statements: Vec, txn_read_write_summaries: Vec>, module_rw_conflict: bool, + start_time: Instant, } impl BlockGasLimitProcessor { @@ -30,6 +32,7 @@ impl BlockGasLimitProcessor { txn_fee_statements: Vec::with_capacity(init_size), txn_read_write_summaries: Vec::with_capacity(init_size), module_rw_conflict: false, + start_time: Instant::now(), } } @@ -190,6 +193,7 @@ impl BlockGasLimitProcessor { is_parallel: bool, num_committed: u32, num_total: u32, + num_workers: usize, ) { let accumulated_effective_block_gas = self.get_effective_accumulated_block_gas(); let accumulated_approx_output_size = self.get_accumulated_approx_output_size(); @@ -216,11 +220,15 @@ impl BlockGasLimitProcessor { .block_gas_limit_type .block_output_limit() .map_or(false, |limit| accumulated_approx_output_size >= limit), + elapsed_ms = self.start_time.elapsed().as_millis(), + num_committed = num_committed, + num_total = num_total, + num_workers = num_workers, "[BlockSTM]: {} execution completed. {} out of {} txns committed", if is_parallel { - "Parallel" + format!("Parallel[{}]", num_workers) } else { - "Sequential" + "Sequential".to_string() }, num_committed, num_total, @@ -231,8 +239,9 @@ impl BlockGasLimitProcessor { &self, num_committed: u32, num_total: u32, + num_workers: usize, ) { - self.finish_update_counters_and_log_info(true, num_committed, num_total) + self.finish_update_counters_and_log_info(true, num_committed, num_total, num_workers) } pub(crate) fn finish_sequential_update_counters_and_log_info( @@ -240,7 +249,7 @@ impl BlockGasLimitProcessor { num_committed: u32, num_total: u32, ) { - self.finish_update_counters_and_log_info(false, num_committed, num_total) + self.finish_update_counters_and_log_info(false, num_committed, num_total, 1) } pub(crate) fn get_block_end_info(&self) -> BlockEndInfo { diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index 2e2fed2badd4e..f6d5de2dd1b39 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -10,7 +10,6 @@ use aptos_aggregator::{ }; use aptos_mvhashmap::types::TxnIndex; use aptos_types::{ - access_path::AccessPath, account_address::AccountAddress, contract_event::TransactionEvent, delayed_fields::PanicError, @@ -29,14 +28,13 @@ use aptos_types::{ use aptos_vm_types::resolver::{TExecutorView, TResourceGroupView}; use bytes::Bytes; use claims::{assert_ge, assert_le, assert_ok}; -use move_core_types::value::MoveTypeLayout; +use move_core_types::{identifier::IdentStr, value::MoveTypeLayout}; use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use once_cell::sync::OnceCell; use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*, proptest, sample::Index}; use proptest_derive::Arbitrary; use std::{ collections::{hash_map::DefaultHasher, BTreeMap, BTreeSet, HashMap, HashSet}, - convert::TryInto, fmt::Debug, hash::{Hash, Hasher}, marker::PhantomData, @@ -154,21 +152,12 @@ pub(crate) struct KeyType( ); impl ModulePath for KeyType { - fn module_path(&self) -> Option { - // Since K is generic, use its hash to assign addresses. - let mut hasher = DefaultHasher::new(); - self.0.hash(&mut hasher); - let mut hashed_address = vec![1u8; AccountAddress::LENGTH - 8]; - hashed_address.extend_from_slice(&hasher.finish().to_ne_bytes()); - - if self.1 { - Some(AccessPath { - address: AccountAddress::new(hashed_address.try_into().unwrap()), - path: b"/foo/b".to_vec(), - }) - } else { - None - } + fn is_module_path(&self) -> bool { + self.1 + } + + fn from_address_and_module_name(_address: &AccountAddress, _module_name: &IdentStr) -> Self { + unimplemented!() } } @@ -894,15 +883,16 @@ where for k in behavior.reads.iter() { // TODO: later test errors as well? (by fixing state_view behavior). // TODO: test aggregator reads. - match k.module_path() { - Some(_) => match view.get_module_bytes(k) { + if k.is_module_path() { + match view.get_module_bytes(k) { Ok(v) => read_results.push(v.map(Into::into)), Err(_) => read_results.push(None), - }, - None => match view.get_resource_bytes(k, None) { + } + } else { + match view.get_resource_bytes(k, None) { Ok(v) => read_results.push(v.map(Into::into)), Err(_) => read_results.push(None), - }, + } } } // Read from groups. @@ -1057,7 +1047,7 @@ where fn resource_write_set(&self) -> Vec<(K, Arc, Option>)> { self.writes .iter() - .filter(|(k, _)| k.module_path().is_none()) + .filter(|(k, _)| !k.is_module_path()) .cloned() .map(|(k, v)| (k, Arc::new(v), None)) .collect() @@ -1066,7 +1056,7 @@ where fn module_write_set(&self) -> BTreeMap { self.writes .iter() - .filter(|(k, _)| k.module_path().is_some()) + .filter(|(k, _)| k.is_module_path()) .cloned() .collect() } diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index f72a408749488..837a47d419ed9 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -1296,7 +1296,7 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> LatestView< kind: ReadKind, ) -> PartialVMResult { debug_assert!( - state_key.module_path().is_none(), + !state_key.is_module_path(), "Reading a module {:?} using ResourceView", state_key, ); @@ -1529,7 +1529,7 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> TModuleView fn get_module_state_value(&self, state_key: &Self::Key) -> PartialVMResult> { debug_assert!( - state_key.module_path().is_some(), + state_key.is_module_path(), "Reading a resource {:?} using ModuleView", state_key, ); diff --git a/aptos-move/e2e-benchmark/src/main.rs b/aptos-move/e2e-benchmark/src/main.rs index fc5600133bfb9..a00e511eaea89 100644 --- a/aptos-move/e2e-benchmark/src/main.rs +++ b/aptos-move/e2e-benchmark/src/main.rs @@ -123,8 +123,8 @@ fn main() { (29000, EntryPoints::InitializeVectorPicture { length: 30 * 1024, }), - (4510, EntryPoints::VectorPicture { length: 30 * 1024 }), - (4400, EntryPoints::VectorPictureRead { length: 30 * 1024 }), + (5900, EntryPoints::VectorPicture { length: 30 * 1024 }), + (5870, EntryPoints::VectorPictureRead { length: 30 * 1024 }), (33580, EntryPoints::SmartTablePicture { length: 30 * 1024, num_points_per_txn: 200, diff --git a/aptos-move/e2e-move-tests/Cargo.toml b/aptos-move/e2e-move-tests/Cargo.toml index 6158f0f52a9f2..c8c59e87b4756 100644 --- a/aptos-move/e2e-move-tests/Cargo.toml +++ b/aptos-move/e2e-move-tests/Cargo.toml @@ -22,6 +22,7 @@ aptos-gas-profiling = { workspace = true } aptos-gas-schedule = { workspace = true, features = ["testing"] } aptos-language-e2e-tests = { workspace = true } aptos-package-builder = { workspace = true } +aptos-transaction-generator-lib = { workspace = true } aptos-types = { workspace = true } aptos-vm = { workspace = true, features = ["testing"] } bcs = { workspace = true } diff --git a/aptos-move/e2e-move-tests/src/aggregator_v2.rs b/aptos-move/e2e-move-tests/src/aggregator_v2.rs index da99d6c2f9c97..de0582a4268fc 100644 --- a/aptos-move/e2e-move-tests/src/aggregator_v2.rs +++ b/aptos-move/e2e-move-tests/src/aggregator_v2.rs @@ -77,17 +77,25 @@ fn initialize_harness( let mut harness = MoveHarness::new_with_executor(executor); // Reduce gas scaling, so that smaller differences in gas are caught in comparison testing. harness.modify_gas_scaling(1000); + + let common_features = vec![ + FeatureFlag::AGGREGATOR_V2_API, + FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::OPERATIONS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE, + ]; + if aggregator_execution_enabled { harness.enable_features( - vec![ - FeatureFlag::AGGREGATOR_V2_API, + [common_features, vec![ FeatureFlag::AGGREGATOR_V2_DELAYED_FIELDS, FeatureFlag::RESOURCE_GROUPS_SPLIT_IN_VM_CHANGE_SET, - ], + ]] + .concat(), vec![], ); } else { - harness.enable_features(vec![FeatureFlag::AGGREGATOR_V2_API], vec![ + harness.enable_features(common_features, vec![ FeatureFlag::AGGREGATOR_V2_DELAYED_FIELDS, FeatureFlag::RESOURCE_GROUPS_SPLIT_IN_VM_CHANGE_SET, ]); diff --git a/aptos-move/e2e-move-tests/src/harness.rs b/aptos-move/e2e-move-tests/src/harness.rs index 16018d40d21d4..84ea350ba36ff 100644 --- a/aptos-move/e2e-move-tests/src/harness.rs +++ b/aptos-move/e2e-move-tests/src/harness.rs @@ -9,7 +9,7 @@ use aptos_gas_schedule::{ AptosGasParameters, FromOnChainGasSchedule, InitialGasSchedule, ToOnChainGasSchedule, }; use aptos_language_e2e_tests::{ - account::{Account, AccountData, TransactionBuilder}, + account::{Account, TransactionBuilder}, executor::FakeExecutor, }; use aptos_types::{ @@ -20,6 +20,7 @@ use aptos_types::{ }, chain_id::ChainId, contract_event::ContractEvent, + fee_statement::FeeStatement, move_utils::MemberId, on_chain_config::{FeatureFlag, GasScheduleV2, OnChainConfig}, state_store::{ @@ -31,6 +32,7 @@ use aptos_types::{ TransactionArgument, TransactionOutput, TransactionPayload, TransactionStatus, ViewFunctionOutput, }, + SupraCoinType, }; use claims::assert_ok; use move_core_types::{ @@ -153,8 +155,9 @@ impl MoveHarness { } pub fn store_and_fund_account(&mut self, acc: &Account, balance: u64, seq_num: u64) -> Account { - let data = AccountData::with_account(acc.clone(), balance, seq_num); - self.executor.add_account_data(&data); + let data = self + .executor + .store_and_fund_account(acc.clone(), balance, seq_num); self.txn_seq_no.insert(*acc.address(), seq_num); data.account().clone() } @@ -252,8 +255,8 @@ impl MoveHarness { account: &Account, payload: TransactionPayload, ) -> TransactionBuilder { - let on_chain_seq_no = self.sequence_number(account.address()); - let seq_no_ref = self.txn_seq_no.get_mut(account.address()).unwrap(); + let on_chain_seq_no = self.sequence_number_opt(account.address()).unwrap_or(0); + let seq_no_ref = self.txn_seq_no.entry(*account.address()).or_insert(0); let seq_no = std::cmp::max(on_chain_seq_no, *seq_no_ref); *seq_no_ref = seq_no + 1; account @@ -321,7 +324,7 @@ impl MoveHarness { &mut self, account: &Account, payload: TransactionPayload, - ) -> (TransactionGasLog, u64) { + ) -> (TransactionGasLog, u64, Option) { let txn = self.create_transaction_payload(account, payload); let (output, gas_log) = self .executor @@ -330,7 +333,11 @@ impl MoveHarness { if matches!(output.status(), TransactionStatus::Keep(_)) { self.executor.apply_write_set(output.write_set()); } - (gas_log, output.gas_used()) + ( + gas_log, + output.gas_used(), + output.try_extract_fee_statement().unwrap(), + ) } /// Creates a transaction which runs the specified entry point `fun`. Arguments need to be @@ -630,7 +637,7 @@ impl MoveHarness { &mut self, account: &Account, path: &Path, - ) -> (TransactionGasLog, u64) { + ) -> (TransactionGasLog, u64, Option) { let txn = self.create_publish_package(account, path, None, |_| {}); let (output, gas_log) = self .executor @@ -639,7 +646,11 @@ impl MoveHarness { if matches!(output.status(), TransactionStatus::Keep(_)) { self.executor.apply_write_set(output.write_set()); } - (gas_log, output.gas_used()) + ( + gas_log, + output.gas_used(), + output.try_extract_fee_statement().unwrap(), + ) } /// Runs transaction which publishes the Move Package. @@ -781,12 +792,15 @@ impl MoveHarness { } pub fn read_aptos_balance(&self, addr: &AccountAddress) -> u64 { - self.read_resource::(addr, CoinStoreResource::struct_tag()) - .map(|c| c.coin()) - .unwrap_or(0) + self.read_resource::>( + addr, + CoinStoreResource::::struct_tag(), + ) + .map(|c| c.coin()) + .unwrap_or(0) + self .read_resource_from_resource_group::( - &aptos_types::account_config::fungible_store::primary_store(addr), + &aptos_types::account_config::fungible_store::primary_apt_store(*addr), ObjectGroupResource::struct_tag(), FungibleStoreResource::struct_tag(), ) @@ -868,10 +882,14 @@ impl MoveHarness { self.override_one_gas_param("txn.max_transaction_size_in_bytes", 1000 * 1024); } - pub fn sequence_number(&self, addr: &AccountAddress) -> u64 { + pub fn sequence_number_opt(&self, addr: &AccountAddress) -> Option { self.read_resource::(addr, AccountResource::struct_tag()) - .unwrap() - .sequence_number() + .as_ref() + .map(AccountResource::sequence_number) + } + + pub fn sequence_number(&self, addr: &AccountAddress) -> u64 { + self.sequence_number_opt(addr).unwrap() } fn chain_id_is_mainnet(&self, addr: &AccountAddress) -> bool { diff --git a/aptos-move/e2e-move-tests/src/tests/access_path_test.rs b/aptos-move/e2e-move-tests/src/tests/access_path_test.rs index b4917624d5a61..40fd2e9f5d08c 100644 --- a/aptos-move/e2e-move-tests/src/tests/access_path_test.rs +++ b/aptos-move/e2e-move-tests/src/tests/access_path_test.rs @@ -90,6 +90,10 @@ fn access_path_panic() { ], }), }], + struct_variant_handles: vec![], + struct_variant_instantiations: vec![], + variant_field_handles: vec![], + variant_field_instantiations: vec![], }; let mut module_bytes = vec![]; diff --git a/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/pack/sources/runtime_checks.move b/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/pack/sources/runtime_checks.move index d48395ecc8cd0..733ffd764302f 100644 --- a/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/pack/sources/runtime_checks.move +++ b/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/pack/sources/runtime_checks.move @@ -118,6 +118,21 @@ module 0x1::runtime_checks { let _ = bcs::to_bytes(&a); } + public entry fun test_serialized_size_with_aggregators() { + let a = with_aggregator(); + let _ = bcs::serialized_size(&a); + } + + public entry fun test_serialized_size_with_snapshots() { + let a = with_snapshot(0); + let _ = bcs::serialized_size(&a); + } + + public entry fun test_serialized_size_with_derived_string_snapshots() { + let a = with_derived_string_snapshot(b"aaa"); + let _ = bcs::serialized_size(&a); + } + // // String utils: // - to_string diff --git a/aptos-move/e2e-move-tests/src/tests/aggregator_v2.rs b/aptos-move/e2e-move-tests/src/tests/aggregator_v2.rs index d24c681d7381d..8126c1d1d2357 100644 --- a/aptos-move/e2e-move-tests/src/tests/aggregator_v2.rs +++ b/aptos-move/e2e-move-tests/src/tests/aggregator_v2.rs @@ -61,7 +61,7 @@ pub(crate) fn setup( _setup(executor_mode, aggregator_execution_mode, txns, false) } -fn setup_allow_fallback( +pub(crate) fn setup_allow_fallback( executor_mode: ExecutorMode, aggregator_execution_mode: AggregatorMode, txns: usize, diff --git a/aptos-move/e2e-move-tests/src/tests/aggregator_v2_enums.rs b/aptos-move/e2e-move-tests/src/tests/aggregator_v2_enums.rs new file mode 100644 index 0000000000000..3d99bfa36a8b3 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/aggregator_v2_enums.rs @@ -0,0 +1,191 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + aggregator_v2::AggV2TestHarness, + tests::{aggregator_v2::AggregatorMode, common}, +}; +use aptos_framework::BuildOptions; +use aptos_language_e2e_tests::executor::ExecutorMode; +use aptos_package_builder::PackageBuilder; +use aptos_types::transaction::SignedTransaction; +use claims::{assert_ok, assert_some}; +use move_core_types::parser::parse_struct_tag; +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Serialize)] +struct Integer { + value: u128, + max_value: u128, +} + +#[derive(Deserialize, Serialize)] +struct Aggregator { + value: u128, + max_value: u128, +} + +#[derive(Deserialize, Serialize)] +enum Counter { + Aggregator(Aggregator), + Integer(Integer), +} + +#[test] +fn test_aggregators_in_enums() { + let mut h = make_harness(155); + + // Create a large block, where a counter is incremented 150 times. During the + // test, we switch between parallel (aggregator) and non-parallel (integer) + // implementations. + let mut txns = vec![initialize(&mut h)]; + for _ in 0..50 { + txns.push(increment(&mut h)); + } + txns.push(switch(&mut h)); + for _ in 0..20 { + txns.push(increment(&mut h)); + } + txns.push(switch(&mut h)); + for _ in 0..50 { + txns.push(increment(&mut h)); + } + txns.push(switch(&mut h)); + for _ in 0..30 { + txns.push(increment(&mut h)); + } + txns.push(switch(&mut h)); + let outputs = h.run_block(txns); + + // All transactions must succeed. + assert!(outputs.into_iter().all(|o| { + let execution_status = assert_ok!(o.status().as_kept_status()); + execution_status.is_success() + })); + + // Test the final value: it must be 150. + let counter = assert_some!(h.harness.read_resource::( + h.account.address(), + parse_struct_tag("0x1::enums_with_aggregators::Counter").unwrap(), + )); + let value = match counter { + Counter::Aggregator(aggregator) => aggregator.value, + Counter::Integer(_) => { + unreachable!("Counter has to be an aggregator after even number of switches") + }, + }; + assert_eq!(value, 150); +} + +fn make_harness(num_txns: usize) -> AggV2TestHarness { + let source = r" + module 0x1::enums_with_aggregators { + use supra_framework::aggregator_v2::{Self, Aggregator}; + + struct Integer has store, drop { + value: u128, + max_value: u128, + } + + fun add(integer: &mut Integer, value: u128) { + integer.value = integer.value + value; + } + + enum Counter has key, drop { + Aggregator { aggregator: Aggregator }, + Integer { integer: Integer }, + } + + public entry fun initialize(account: &signer, parallel: bool) { + let counter = if (parallel) { + let aggregator = aggregator_v2::create_aggregator(1000); + Counter::Aggregator { aggregator } + } else { + let integer = Integer { value: 0, max_value: 1000 }; + Counter::Integer { integer } + }; + move_to(account, counter); + } + + public entry fun increment(addr: address) acquires Counter { + let counter = borrow_global_mut(addr); + match (counter) { + Counter::Aggregator { aggregator } => { + aggregator_v2::add(aggregator, 1); + }, + Counter::Integer { integer } => { + add(integer, 1); + }, + } + } + + public entry fun switch(addr: address) acquires Counter { + let counter = borrow_global_mut(addr); + match (counter) { + Counter::Aggregator { aggregator } => { + let value = aggregator_v2::read(aggregator); + let integer = Integer { value, max_value: 1000 }; + *counter = Counter::Integer { integer }; + }, + Counter::Integer { integer } => { + let aggregator = aggregator_v2::create_aggregator(1000); + aggregator_v2::add(&mut aggregator, integer.value); + *counter = Counter::Aggregator { aggregator }; + }, + } + } + } + "; + + // Create a package with testing code. + let mut builder = PackageBuilder::new("enums_with_aggregators"); + builder.add_source("enums_with_aggregators.move", source); + builder.add_local_dep( + "SupraFramework", + &common::framework_dir_path("supra-framework").to_string_lossy(), + ); + let path = builder.write_to_temp().unwrap(); + + let mut h = crate::tests::aggregator_v2::setup_allow_fallback( + ExecutorMode::BothComparison, + AggregatorMode::BothComparison, + num_txns + 1, + ); + + // Publish the package to ensure subsequent tests can use that code. + let txn = h.harness.create_publish_package( + &h.account, + path.path(), + Some(BuildOptions::move_2()), + |_| {}, + ); + h.run_block(vec![txn]); + h +} + +fn initialize(h: &mut AggV2TestHarness) -> SignedTransaction { + h.harness.create_entry_function( + &h.account, + str::parse("0x1::enums_with_aggregators::initialize").unwrap(), + vec![], + vec![bcs::to_bytes(&true).unwrap()], + ) +} + +fn increment(h: &mut AggV2TestHarness) -> SignedTransaction { + h.harness.create_entry_function( + &h.account, + str::parse("0x1::enums_with_aggregators::increment").unwrap(), + vec![], + vec![bcs::to_bytes(h.account.address()).unwrap()], + ) +} + +fn switch(h: &mut AggV2TestHarness) -> SignedTransaction { + h.harness.create_entry_function( + &h.account, + str::parse("0x1::enums_with_aggregators::switch").unwrap(), + vec![], + vec![bcs::to_bytes(h.account.address()).unwrap()], + ) +} diff --git a/aptos-move/e2e-move-tests/src/tests/aggregator_v2_runtime_checks.rs b/aptos-move/e2e-move-tests/src/tests/aggregator_v2_runtime_checks.rs index 9f9cb001d8dc3..2fb694ac99c14 100644 --- a/aptos-move/e2e-move-tests/src/tests/aggregator_v2_runtime_checks.rs +++ b/aptos-move/e2e-move-tests/src/tests/aggregator_v2_runtime_checks.rs @@ -101,6 +101,21 @@ fn test_serialization() { }); } +#[test] +fn test_serialized_size() { + let func_names = vec![ + "0x1::runtime_checks::test_serialized_size_with_aggregators", + "0x1::runtime_checks::test_serialized_size_with_snapshots", + "0x1::runtime_checks::test_serialized_size_with_derived_string_snapshots", + ]; + + // Serialized size of delayed values is deterministic and fixed, so running + // these functions should succeed, unlike regular serialization. + run_entry_functions(func_names, |status: ExecutionStatus| { + assert_eq!(status, ExecutionStatus::Success); + }); +} + #[test] fn test_string_utils() { let func_names = vec![ diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native/Move.toml b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native/Move.toml new file mode 100644 index 0000000000000..a2eaeab5a4874 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native/Move.toml @@ -0,0 +1,7 @@ +[package] +name = "test_package" +version = "0.0.0" +upgrade_policy = "compatible" + +[dependencies] +SupraFramework = { local = "../../../../../framework/supra-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native/sources/test.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native/sources/test.move new file mode 100644 index 0000000000000..5a81004d6ed82 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native/sources/test.move @@ -0,0 +1,3 @@ +module 0xcafe::test { + public entry native fun hello(s: &signer, value: u64); +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native_system/Move.toml b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native_system/Move.toml new file mode 100644 index 0000000000000..a2eaeab5a4874 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native_system/Move.toml @@ -0,0 +1,7 @@ +[package] +name = "test_package" +version = "0.0.0" +upgrade_policy = "compatible" + +[dependencies] +SupraFramework = { local = "../../../../../framework/supra-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native_system/sources/test.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native_system/sources/test.move new file mode 100644 index 0000000000000..e6c5adf4bb2e3 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_native_system/sources/test.move @@ -0,0 +1,3 @@ +module 0x1::test { + public entry native fun hello(s: &signer, value: u64); +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.rs b/aptos-move/e2e-move-tests/src/tests/code_publishing.rs index 7aecb842bdc94..1aaf533ccdb91 100644 --- a/aptos-move/e2e-move-tests/src/tests/code_publishing.rs +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.rs @@ -91,6 +91,56 @@ fn code_publishing_upgrade_success_compat() { )); } +#[test] +fn code_publishing_disallow_native() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + + assert_vm_status!( + h.publish_package_cache_building( + &acc, + &common::test_dir_path("code_publishing.data/pack_native"), + ), + StatusCode::USER_DEFINED_NATIVE_NOT_ALLOWED + ); +} + +#[test] +fn code_publishing_disallow_native_entry_func() { + let mut h = MoveHarness::new(); + // Disable feature for now to publish the package. + h.enable_features(vec![], vec![FeatureFlag::DISALLOW_USER_NATIVES]); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + + assert_success!(h.publish_package_cache_building( + &acc, + &common::test_dir_path("code_publishing.data/pack_native"), + )); + + // Re-enable the flag to test the behavior for this entry native + h.enable_features(vec![FeatureFlag::DISALLOW_USER_NATIVES], vec![]); + assert_vm_status!( + h.run_entry_function( + &acc, + str::parse("0xcafe::test::hello").unwrap(), + vec![], + vec![] + ), + StatusCode::USER_DEFINED_NATIVE_NOT_ALLOWED + ); +} + +#[test] +fn code_publishing_allow_system_native() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x1").unwrap()); + + assert_success!(h.publish_package_cache_building( + &acc, + &common::test_dir_path("code_publishing.data/pack_native_system"), + )); +} + #[test] fn code_publishing_upgrade_fail_compat() { let mut h = MoveHarness::new(); diff --git a/aptos-move/e2e-move-tests/src/tests/enum_upgrade.rs b/aptos-move/e2e-move-tests/src/tests/enum_upgrade.rs new file mode 100644 index 0000000000000..998fafd118eea --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/enum_upgrade.rs @@ -0,0 +1,112 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for enum type upgrade compatibility + +use crate::{assert_success, assert_vm_status, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_language_e2e_tests::account::Account; +use aptos_package_builder::PackageBuilder; +use aptos_types::{account_address::AccountAddress, transaction::TransactionStatus}; +use move_core_types::vm_status::StatusCode; + +#[test] +fn enum_upgrade() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x815").unwrap()); + + // Initial publish + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data { + V1{x: u64} + } + } + "#, + ); + assert_success!(result); + + // Add a compatible variant + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data { + V1{x: u64}, + V2{x: u64, y: u8}, + } + } + "#, + ); + assert_success!(result); + + // Upgrade identity + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data { + V1{x: u64}, + V2{x: u64, y: u8}, + } + } + "#, + ); + assert_success!(result); + + // Incompatible because of modification + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data { + V1{x: u64, z: u32}, + V2{x: u64, y: u8}, + } + } + "#, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + // Incompatible because of removal + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data { + V2{x: u64, y: u8}, + } + } + "#, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + // Incompatible because of renaming + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data { + V1{x: u64}, + V2a{x: u64, y: u8}, + } + } + "#, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); +} + +fn publish(h: &mut MoveHarness, account: &Account, source: &str) -> TransactionStatus { + let mut builder = PackageBuilder::new("Package"); + builder.add_source("m.move", source); + let path = builder.write_to_temp().unwrap(); + h.publish_package_with_options(account, path.path(), BuildOptions::move_2()) +} diff --git a/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/Move.toml b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/Move.toml new file mode 100644 index 0000000000000..a9fced9b0156c --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/Move.toml @@ -0,0 +1,6 @@ +[package] +name = 'FederatedKeylessInitConfig' +version = "0.0.0" + +[dependencies] +SupraFramework = { local = "../../../../../framework/supra-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/sources/main.move b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/sources/main.move new file mode 100644 index 0000000000000..c53c134b3172d --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/federated_keyless_init_config.data/pack/sources/main.move @@ -0,0 +1,20 @@ +script { + use supra_framework::supra_governance; + use supra_framework::jwks; + use supra_framework::keyless_account; + + fun main(core_resources: &signer, max_exp_horizon_secs: u64) { + let fx = supra_governance::get_signer_testnet_only(core_resources, @supra_framework); + + keyless_account::update_max_exp_horizon_for_next_epoch(&fx, max_exp_horizon_secs); + + // remove all the JWKs in 0x1 (since we will be reusing the iss as a federated one; and we don't want the 0x1 JWKs to take priority over our federated JWKs) + let patches = vector[ + jwks::new_patch_remove_all(), + ]; + jwks::set_patches(&fx, patches); + + // sets the pending Configuration change to the max expiration horizon from above + supra_governance::force_end_epoch_test_only(core_resources); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs index 2a2242f97afc8..b858141a943bb 100644 --- a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs +++ b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs @@ -13,6 +13,7 @@ use aptos_types::{ move_utils::MemberId, on_chain_config::FeatureFlag, transaction::{EntryFunction, ExecutionStatus, Script, TransactionPayload, TransactionStatus}, + SupraCoinType, }; use aptos_vm_types::storage::StorageGasParameters; use move_core_types::{move_resource::MoveStructType, vm_status::StatusCode}; @@ -114,8 +115,10 @@ fn test_account_not_exist_with_fee_payer() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let bob_start = h.read_aptos_balance(bob.address()); @@ -131,8 +134,10 @@ fn test_account_not_exist_with_fee_payer() { let output = h.run_raw(transaction); assert_success!(*output.status()); - let alice_after = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_after = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_after.is_none()); let bob_after = h.read_aptos_balance(bob.address()); @@ -152,8 +157,10 @@ fn test_account_not_exist_with_fee_payer_insufficient_gas() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let bob_start = h.read_aptos_balance(bob.address()); @@ -172,8 +179,10 @@ fn test_account_not_exist_with_fee_payer_insufficient_gas() { &TransactionStatus::Discard(StatusCode::MAX_GAS_UNITS_BELOW_MIN_TRANSACTION_GAS_UNITS), )); - let alice_after = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_after = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_after.is_none()); let bob_after = h.read_aptos_balance(bob.address()); assert_eq!(bob_start, bob_after); @@ -192,8 +201,10 @@ fn test_account_not_exist_and_move_abort_with_fee_payer_create_account() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let bob_start = h.read_aptos_balance(bob.address()); @@ -227,8 +238,10 @@ fn test_account_not_exist_and_move_abort_with_fee_payer_create_account() { assert!(output.gas_used() <= PRICING.new_account_upfront(GAS_UNIT_PRICE)); assert!(output.gas_used() > PRICING.new_account_min_abort(GAS_UNIT_PRICE)); - let alice_after = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_after = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_after.is_none()); let bob_after = h.read_aptos_balance(bob.address()); @@ -335,8 +348,10 @@ fn test_account_not_exist_with_fee_payer_without_create_account() { let alice = Account::new(); let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); - let alice_start = - h.read_resource::(alice.address(), CoinStoreResource::struct_tag()); + let alice_start = h.read_resource::>( + alice.address(), + CoinStoreResource::::struct_tag(), + ); assert!(alice_start.is_none()); let payload = aptos_stdlib::supra_account_set_allow_direct_coin_transfers(true); diff --git a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs index 3d64a6838f69d..5956d104c480f 100644 --- a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs +++ b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs @@ -1,8 +1,16 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{assert_success, tests::common, MoveHarness}; -use aptos_types::account_address::{self, AccountAddress}; +use crate::{assert_success, tests::common, BlockSplit, MoveHarness, SUCCESS}; +use aptos_cached_packages::aptos_stdlib::{supra_account_batch_transfer, supra_account_transfer}; +use aptos_language_e2e_tests::{ + account::Account, + executor::{ExecutorMode, FakeExecutor}, +}; +use aptos_types::{ + account_address::{self, AccountAddress}, + on_chain_config::FeatureFlag, +}; use move_core_types::{ identifier::Identifier, language_storage::{StructTag, TypeTag}, @@ -226,3 +234,58 @@ fn test_coin_to_fungible_asset_migration() { ) .is_some()); } + +/// Trigger speculative error in prologue, from accessing delayed field that was created later than +/// last committed index (so that read_last_commited_value fails speculatively) +/// +/// We do that by having an expensive transaction first (to make sure committed index isn't moved), +/// and then create some new aggregators (concurrent balances for new accounts), and then have them issue +/// transactions - so their balance is checked in prologue. +#[test] +fn test_prologue_speculation() { + let executor = FakeExecutor::from_head_genesis().set_executor_mode(ExecutorMode::ParallelOnly); + + let mut harness = MoveHarness::new_with_executor(executor); + harness.enable_features( + vec![ + FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::OPERATIONS_DEFAULT_TO_FA_APT_STORE, + FeatureFlag::DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE, + ], + vec![], + ); + let independent_account = harness.new_account_at(AccountAddress::random()); + + let sink_txn = harness.create_transaction_payload( + &independent_account, + supra_account_batch_transfer(vec![AccountAddress::random(); 50], vec![10_000_000_000; 50]), + ); + + let account = harness.new_account_at(AccountAddress::ONE); + let dst_1 = Account::new(); + let dst_2 = Account::new(); + let dst_3 = Account::new(); + + let fund_txn = harness.create_transaction_payload( + &account, + supra_account_batch_transfer( + vec![*dst_1.address(), *dst_2.address(), *dst_3.address()], + vec![10_000_000_000, 10_000_000_000, 10_000_000_000], + ), + ); + + let transfer_1_txn = + harness.create_transaction_payload(&dst_1, supra_account_transfer(*dst_2.address(), 1)); + let transfer_2_txn = + harness.create_transaction_payload(&dst_2, supra_account_transfer(*dst_3.address(), 1)); + let transfer_3_txn = + harness.create_transaction_payload(&dst_3, supra_account_transfer(*dst_1.address(), 1)); + + harness.run_block_in_parts_and_check(BlockSplit::Whole, vec![ + (SUCCESS, sink_txn), + (SUCCESS, fund_txn), + (SUCCESS, transfer_1_txn), + (SUCCESS, transfer_2_txn), + (SUCCESS, transfer_3_txn), + ]); +} diff --git a/aptos-move/e2e-move-tests/src/tests/gas.rs b/aptos-move/e2e-move-tests/src/tests/gas.rs index 42f718e104efb..4a32d5e0f7b02 100644 --- a/aptos-move/e2e-move-tests/src/tests/gas.rs +++ b/aptos-move/e2e-move-tests/src/tests/gas.rs @@ -13,14 +13,25 @@ use crate::{ }; use aptos_cached_packages::{aptos_stdlib, aptos_token_sdk_builder}; use aptos_crypto::{ed25519, PrivateKey, Uniform}; +use aptos_gas_algebra::GasQuantity; use aptos_gas_profiling::TransactionGasLog; +use aptos_language_e2e_tests::account::Account; +use aptos_transaction_generator_lib::{ + publishing::{ + module_simple::{LoopType, MultiSigConfig}, + publish_util::PackageHandler, + }, + EntryPoints, +}; use aptos_types::{ account_address::{default_stake_pool_address, AccountAddress}, account_config::CORE_CODE_ADDRESS, + fee_statement::FeeStatement, transaction::{EntryFunction, TransactionPayload}, vm::configs::set_paranoid_type_checks, }; use move_core_types::{identifier::Identifier, language_storage::ModuleId}; +use rand::{rngs::StdRng, SeedableRng}; use sha3::{Digest, Sha3_512}; use std::path::Path; @@ -68,6 +79,94 @@ fn save_profiling_results(name: &str, log: &TransactionGasLog) { .unwrap(); } +pub struct SummaryExeAndIO { + pub intrinsic_cost: f64, + pub execution_cost: f64, + pub read_cost: f64, + pub write_cost: f64, +} + +fn summarize_exe_and_io(log: TransactionGasLog) -> SummaryExeAndIO { + fn cast(gas: GasQuantity) -> f64 { + u64::from(gas) as f64 + } + + let scale = cast(log.exec_io.gas_scaling_factor); + + let aggregated = log.exec_io.aggregate_gas_events(); + + let execution = aggregated.ops.iter().map(|(_, _, v)| cast(*v)).sum::(); + let read = aggregated + .storage_reads + .iter() + .map(|(_, _, v)| cast(*v)) + .sum::(); + let write = aggregated + .storage_writes + .iter() + .map(|(_, _, v)| cast(*v)) + .sum::(); + SummaryExeAndIO { + intrinsic_cost: cast(log.exec_io.intrinsic_cost) / scale, + execution_cost: execution / scale, + read_cost: read / scale, + write_cost: write / scale, + } +} + +struct Runner { + pub harness: MoveHarness, + profile_gas: bool, +} + +impl Runner { + pub fn run(&mut self, function: &str, account: &Account, payload: TransactionPayload) { + if !self.profile_gas { + print_gas_cost(function, self.harness.evaluate_gas(account, payload)); + } else { + let (log, gas_used, fee_statement) = + self.harness.evaluate_gas_with_profiler(account, payload); + save_profiling_results(function, &log); + print_gas_cost_with_statement(function, gas_used, fee_statement); + } + } + + pub fn run_with_tps_estimate( + &mut self, + function: &str, + account: &Account, + payload: TransactionPayload, + tps: f64, + ) { + if !self.profile_gas { + print_gas_cost(function, self.harness.evaluate_gas(account, payload)); + } else { + let (log, gas_used, fee_statement) = + self.harness.evaluate_gas_with_profiler(account, payload); + save_profiling_results(function, &log); + print_gas_cost_with_statement_and_tps( + function, + gas_used, + fee_statement, + summarize_exe_and_io(log), + tps, + ); + } + } + + pub fn publish(&mut self, name: &str, account: &Account, path: &Path) { + if !self.profile_gas { + print_gas_cost(name, self.harness.evaluate_publish_gas(account, path)); + } else { + let (log, gas_used, fee_statement) = self + .harness + .evaluate_publish_gas_with_profiler(account, path); + save_profiling_results(name, &log); + print_gas_cost_with_statement(name, gas_used, fee_statement); + } + } +} + /// Run with `cargo test test_gas -- --nocapture` to see output. #[test] fn test_gas() { @@ -91,52 +190,34 @@ fn test_gas() { Err(_) => true, }; - let run = |harness: &mut MoveHarness, function, account, payload| { - if !profile_gas { - print_gas_cost(function, harness.evaluate_gas(account, payload)); - } else { - let (log, gas_used) = harness.evaluate_gas_with_profiler(account, payload); - save_profiling_results(function, &log); - print_gas_cost(function, gas_used); - } - }; - - let publish = |harness: &mut MoveHarness, name, account, path: &Path| { - if !profile_gas { - print_gas_cost(name, harness.evaluate_publish_gas(account, path)); - } else { - let (log, gas_used) = harness.evaluate_publish_gas_with_profiler(account, path); - save_profiling_results(name, &log); - print_gas_cost(name, gas_used); - } + let mut runner = Runner { + harness, + profile_gas, }; set_paranoid_type_checks(true); - run( - &mut harness, + runner.run( "Transfer", account_1, aptos_stdlib::aptos_coin_transfer(account_2_address, 1000), ); - run( - &mut harness, + runner.run( "2ndTransfer", account_1, aptos_stdlib::aptos_coin_transfer(account_2_address, 1000), ); - run( - &mut harness, + runner.run( "CreateAccount", account_1, aptos_stdlib::supra_account_create_account( AccountAddress::from_hex_literal("0xcafe1").unwrap(), ), ); - run( - &mut harness, + + runner.run( "CreateTransfer", account_1, aptos_stdlib::supra_account_transfer( @@ -145,35 +226,30 @@ fn test_gas() { ), ); - publish_object_token_example(&mut harness, account_1_address, account_1); - run( - &mut harness, + publish_object_token_example(&mut runner.harness, account_1_address, account_1); + runner.run( "MintTokenV2", account_1, create_mint_hero_payload(&account_1_address, SHORT_STR), ); - run( - &mut harness, + runner.run( "MutateTokenV2", account_1, create_set_hero_description_payload(&account_1_address, SHORT_STR), ); - publish_object_token_example(&mut harness, account_2_address, account_2); - run( - &mut harness, + publish_object_token_example(&mut runner.harness, account_2_address, account_2); + runner.run( "MintLargeTokenV2", account_2, create_mint_hero_payload(&account_2_address, LONG_STR), ); - run( - &mut harness, + runner.run( "MutateLargeTokenV2", account_2, create_set_hero_description_payload(&account_2_address, LONG_STR), ); - run( - &mut harness, + runner.run( "CreateStakePool", account_1, aptos_stdlib::staking_contract_create_staking_contract( @@ -187,48 +263,41 @@ fn test_gas() { let pool_address = default_stake_pool_address(account_1_address, account_2_address); let consensus_key = ed25519::PrivateKey::generate_for_testing(); let consensus_pubkey = consensus_key.public_key().to_bytes().to_vec(); - run( - &mut harness, + runner.run( "RotateConsensusKey", account_2, aptos_stdlib::stake_rotate_consensus_key(pool_address, consensus_pubkey), ); - run( - &mut harness, + runner.run( "JoinValidator100", account_2, aptos_stdlib::stake_join_validator_set(pool_address), ); - run( - &mut harness, + runner.run( "AddStake", account_1, aptos_stdlib::staking_contract_add_stake(account_2_address, 1000), ); - run( - &mut harness, + runner.run( "UnlockStake", account_1, aptos_stdlib::staking_contract_unlock_stake(account_2_address, 1000), ); - harness.fast_forward(7200); - harness.new_epoch(); - run( - &mut harness, + runner.harness.fast_forward(7200); + runner.harness.new_epoch(); + runner.run( "WithdrawStake", account_1, aptos_stdlib::staking_contract_distribute(account_1_address, account_2_address), ); - run( - &mut harness, + runner.run( "LeaveValidatorSet100", account_2, aptos_stdlib::stake_leave_validator_set(pool_address), ); let collection_name = "collection name".to_owned().into_bytes(); let token_name = "token name".to_owned().into_bytes(); - run( - &mut harness, + runner.run( "CreateCollection", account_1, aptos_token_sdk_builder::token_create_collection_script( @@ -239,8 +308,7 @@ fn test_gas() { vec![false, false, false], ), ); - run( - &mut harness, + runner.run( "CreateTokenFirstTime", account_1, aptos_token_sdk_builder::token_create_token_script( @@ -259,8 +327,7 @@ fn test_gas() { vec!["int".as_bytes().to_vec()], ), ); - run( - &mut harness, + runner.run( "MintTokenV1", account_1, aptos_token_sdk_builder::token_mint_script( @@ -270,8 +337,7 @@ fn test_gas() { 1, ), ); - run( - &mut harness, + runner.run( "MutateTokenV1", account_1, aptos_token_sdk_builder::token_mutate_token_properties( @@ -286,9 +352,8 @@ fn test_gas() { vec!["int".as_bytes().to_vec()], ), ); - run( - &mut harness, - "MutateTokenV12ndTime", + runner.run( + "MutateToken2ndTime", account_1, aptos_token_sdk_builder::token_mutate_token_properties( account_1_address, @@ -311,8 +376,7 @@ fn test_gas() { vals.push(format!("{}", i).as_bytes().to_vec()); typs.push("u64".as_bytes().to_vec()); } - run( - &mut harness, + runner.run( "MutateTokenAdd10NewProperties", account_1, aptos_token_sdk_builder::token_mutate_token_properties( @@ -327,8 +391,7 @@ fn test_gas() { typs.clone(), ), ); - run( - &mut harness, + runner.run( "MutateTokenMutate10ExistingProperties", account_1, aptos_token_sdk_builder::token_mutate_token_properties( @@ -344,51 +407,46 @@ fn test_gas() { ), ); - let publisher = &harness.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - publish( - &mut harness, + let publisher = &runner + .harness + .new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + runner.publish( "PublishSmall", publisher, &test_dir_path("code_publishing.data/pack_initial"), ); - publish( - &mut harness, + runner.publish( "UpgradeSmall", publisher, &test_dir_path("code_publishing.data/pack_upgrade_compat"), ); - publish( - &mut harness, + let publisher = &runner.harness.aptos_framework_account(); + runner.publish( "PublishLarge", publisher, &test_dir_path("code_publishing.data/pack_large"), ); - publish( - &mut harness, + runner.publish( "UpgradeLarge", publisher, &test_dir_path("code_publishing.data/pack_large_upgrade"), ); - publish( - &mut harness, + runner.publish( "PublishDependencyChain-1", publisher, &test_dir_path("dependencies.data/p1"), ); - publish( - &mut harness, + runner.publish( "PublishDependencyChain-2", publisher, &test_dir_path("dependencies.data/p2"), ); - publish( - &mut harness, + runner.publish( "PublishDependencyChain-3", publisher, &test_dir_path("dependencies.data/p3"), ); - run( - &mut harness, + runner.run( "UseDependencyChain-1", publisher, TransactionPayload::EntryFunction(EntryFunction::new( @@ -401,8 +459,7 @@ fn test_gas() { vec![], )), ); - run( - &mut harness, + runner.run( "UseDependencyChain-2", publisher, TransactionPayload::EntryFunction(EntryFunction::new( @@ -415,8 +472,7 @@ fn test_gas() { vec![], )), ); - run( - &mut harness, + runner.run( "UseDependencyChain-3", publisher, TransactionPayload::EntryFunction(EntryFunction::new( @@ -437,12 +493,240 @@ fn dollar_cost(gas_units: u64, price: u64) -> f64 { pub fn print_gas_cost(function: &str, gas_units: u64) { println!( - "{:20} | {:8} | {:.6} | {:.6} | {:.6}", + "{:8} | {:.6} | {:.6} | {:.6} | {}", + gas_units, + dollar_cost(gas_units, 5), + dollar_cost(gas_units, 15), + dollar_cost(gas_units, 30), function, + ); +} + +pub fn print_gas_cost_with_statement( + function: &str, + gas_units: u64, + fee_statement: Option, +) { + println!( + "{:8} | {:.6} | {:.6} | {:.6} | {:8} | {:8} | {:8} | {}", gas_units, dollar_cost(gas_units, 5), dollar_cost(gas_units, 15), - dollar_cost(gas_units, 30) + dollar_cost(gas_units, 30), + fee_statement.unwrap().execution_gas_used() + fee_statement.unwrap().io_gas_used(), + fee_statement.unwrap().execution_gas_used(), + fee_statement.unwrap().io_gas_used(), + function, + ); +} + +pub fn print_gas_cost_with_statement_and_tps_header() { + println!( + "{:9} | {:9.6} | {:9.6} | {:9.6} | {:8} | {:8} | {:8} | {:8} | {:8} | {:8} | {:10}", + "gas units", + "$ at 5", + "$ at 15", + "$ at 30", + "exe+io g", + // "exe gas", + // "io gas", + "intrins", + "execut", + "read", + "write", + "gas / s", + "function", + ); +} + +pub fn print_gas_cost_with_statement_and_tps( + function: &str, + gas_units: u64, + fee_statement: Option, + summary: SummaryExeAndIO, + tps: f64, +) { + println!( + "{:9} | {:9.6} | {:9.6} | {:9.6} | {:8} | {:8.2} | {:8.2} | {:8.2} | {:8.2} | {:8.0} | {}", + gas_units, + dollar_cost(gas_units, 5), + dollar_cost(gas_units, 15), + dollar_cost(gas_units, 30), + fee_statement.unwrap().execution_gas_used() + fee_statement.unwrap().io_gas_used(), + // fee_statement.unwrap().execution_gas_used(), + // fee_statement.unwrap().io_gas_used(), + summary.intrinsic_cost, + summary.execution_cost, + summary.read_cost, + summary.write_cost, + (fee_statement.unwrap().execution_gas_used() + fee_statement.unwrap().io_gas_used()) as f64 + * tps, + function, + ); +} + +#[test] +#[ignore] +fn test_txn_generator_workloads_calibrate_gas() { + // Start with 100 validators. + let mut harness = MoveHarness::new_with_validators(100); + let account_1 = &harness.new_account_at(AccountAddress::from_hex_literal("0x121").unwrap()); + let account_2 = &harness.new_account_at(AccountAddress::from_hex_literal("0x122").unwrap()); + let account_2_address = *account_2.address(); + + // Use the gas profiler unless explicitly disabled by the user. + // + // This is to give us some basic code coverage on the gas profile. + let profile_gas = match std::env::var("PROFILE_GAS") { + Ok(s) => { + let s = s.to_lowercase(); + s == "1" && s == "true" && s == "yes" + }, + Err(_) => true, + }; + + let mut runner = Runner { + harness, + profile_gas, + }; + + set_paranoid_type_checks(true); + + print_gas_cost_with_statement_and_tps_header(); + + let use_large_db_numbers = true; + + // Constants here are produced from running + // NUMBER_OF_EXECUTION_THREADS=1 testsuite/single_node_performance.py + // on a prod-spec'd machine. + let entry_points = vec![ + (2963., 4103., EntryPoints::Nop), + (2426., 3411., EntryPoints::BytesMakeOrChange { + data_length: Some(32), + }), + (2388., 3270., EntryPoints::IncGlobal), + (27., 28., EntryPoints::Loop { + loop_count: Some(100000), + loop_type: LoopType::NoOp, + }), + (44., 42., EntryPoints::Loop { + loop_count: Some(10000), + loop_type: LoopType::Arithmetic, + }), + // This is a cheap bcs (serializing vec), so not representative of what BCS native call should cost. + // (175., EntryPoints::Loop { loop_count: Some(1000), loop_type: LoopType::BCS { len: 1024 }}), + (666., 1031., EntryPoints::CreateObjects { + num_objects: 10, + object_payload_size: 0, + }), + (103., 108., EntryPoints::CreateObjects { + num_objects: 10, + object_payload_size: 10 * 1024, + }), + (93., 148., EntryPoints::CreateObjects { + num_objects: 100, + object_payload_size: 0, + }), + (43., 50., EntryPoints::CreateObjects { + num_objects: 100, + object_payload_size: 10 * 1024, + }), + (1605., 2100., EntryPoints::InitializeVectorPicture { + length: 40, + }), + (2850., 3400., EntryPoints::VectorPicture { length: 40 }), + (2900., 3480., EntryPoints::VectorPictureRead { length: 40 }), + (30., 31., EntryPoints::InitializeVectorPicture { + length: 30 * 1024, + }), + (169., 180., EntryPoints::VectorPicture { length: 30 * 1024 }), + (189., 200., EntryPoints::VectorPictureRead { + length: 30 * 1024, + }), + (22., 17.8, EntryPoints::SmartTablePicture { + length: 30 * 1024, + num_points_per_txn: 200, + }), + (3., 2.75, EntryPoints::SmartTablePicture { + length: 1024 * 1024, + num_points_per_txn: 1024, + }), + (1351., 1719., EntryPoints::TokenV1MintAndTransferFT), + ( + 971., + 1150., + EntryPoints::TokenV1MintAndTransferNFTSequential, + ), + (1077., 1274., EntryPoints::TokenV2AmbassadorMint { + numbered: true, + }), + ]; + + for (large_db_tps, small_db_tps, entry_point) in &entry_points { + if let MultiSigConfig::None = entry_point.multi_sig_additional_num() { + let publisher = runner.harness.new_account_with_key_pair(); + let user = runner.harness.new_account_with_key_pair(); + + let mut package_handler = PackageHandler::new(entry_point.package_name()); + let mut rng = StdRng::seed_from_u64(14); + let package = package_handler.pick_package(&mut rng, *publisher.address()); + runner + .harness + .run_transaction_payload(&publisher, package.publish_transaction_payload()); + if let Some(init_entry_point) = entry_point.initialize_entry_point() { + runner.harness.run_transaction_payload( + &publisher, + init_entry_point.create_payload( + package.get_module_id(init_entry_point.module_name()), + Some(&mut rng), + Some(publisher.address()), + ), + ); + } + + runner.run_with_tps_estimate( + &format!("entry_point_{entry_point:?}"), + &user, + entry_point.create_payload( + package.get_module_id(entry_point.module_name()), + Some(&mut rng), + Some(publisher.address()), + ), + if use_large_db_numbers { + *large_db_tps + } else { + *small_db_tps + }, + ); + } else { + println!("Skipping multisig {entry_point:?}"); + } + } + + runner.run_with_tps_estimate( + "Transfer", + account_1, + aptos_stdlib::aptos_coin_transfer(account_2_address, 1000), + if use_large_db_numbers { 2032. } else { 2791. }, + ); + + runner.run_with_tps_estimate( + "CreateAccount", + account_1, + aptos_stdlib::supra_account_create_account( + AccountAddress::from_hex_literal("0xcafe1").unwrap(), + ), + if use_large_db_numbers { 1583.0 } else { 2215. }, + ); + + let mut package_handler = PackageHandler::new("simple"); + let mut rng = StdRng::seed_from_u64(14); + let package = package_handler.pick_package(&mut rng, *account_1.address()); + runner.run_with_tps_estimate( + "PublishModule", + account_1, + package.publish_transaction_payload(), + if use_large_db_numbers { 138.0 } else { 148. }, ); } diff --git a/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs b/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs index 58fdd63b7589b..aac8a49250864 100644 --- a/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs +++ b/aptos-move/e2e-move-tests/src/tests/keyless_feature_gating.rs @@ -6,26 +6,35 @@ use aptos_cached_packages::aptos_stdlib; use aptos_crypto::{hash::CryptoHash, SigningKey}; use aptos_language_e2e_tests::account::{Account, AccountPublicKey, TransactionBuilder}; use aptos_types::{ + account_config::CORE_CODE_ADDRESS, + jwks::{rsa::RSA_JWK, secure_test_rsa_jwk}, keyless::{ test_utils::{ get_groth16_sig_and_pk_for_upgraded_vk, get_sample_esk, get_sample_groth16_sig_and_pk, get_sample_iss, get_sample_jwk, get_sample_openid_sig_and_pk, get_upgraded_vk, }, - Configuration, EphemeralCertificate, Groth16VerificationKey, KeylessPublicKey, - KeylessSignature, TransactionAndProof, + AnyKeylessPublicKey, Configuration, EphemeralCertificate, FederatedKeylessPublicKey, + Groth16VerificationKey, KeylessPublicKey, KeylessSignature, TransactionAndProof, }, on_chain_config::FeatureFlag, transaction::{ authenticator::{AnyPublicKey, AuthenticationKey, EphemeralSignature}, - Script, SignedTransaction, Transaction, TransactionStatus, + EntryFunction, Script, SignedTransaction, Transaction, TransactionStatus, }, }; use move_core_types::{ account_address::AccountAddress, + ident_str, + language_storage::ModuleId, transaction_argument::TransactionArgument, - vm_status::{StatusCode, StatusCode::FEATURE_UNDER_GATING}, + value::{serialize_values, MoveValue}, + vm_status::{ + StatusCode, + StatusCode::{FEATURE_UNDER_GATING, INVALID_SIGNATURE}, + }, }; +/// Initializes an Aptos VM and sets the keyless configuration via script (the VK is already set in genesis). fn init_feature_gating( enabled_features: Vec, disabled_features: Vec, @@ -89,7 +98,7 @@ fn test_rotate_vk() { // Old proof for old VK let (old_sig, pk) = get_sample_groth16_sig_and_pk(); - let account = create_keyless_account(&mut h, pk.clone()); + let account = create_keyless_account(&mut h, pk); let transaction = spend_keyless_account(&mut h, old_sig.clone(), &account, *recipient.address()); let output = h.run_raw(transaction); @@ -203,11 +212,117 @@ fn test_feature_gating_with_zk_off() { test_feature_gating(&mut h, &recipient, get_sample_openid_sig_and_pk, false); } +/// Creates a federated keyless account associated with a JWK addr. First, ensures TXN validation +/// for this account fails because the JWKs are not installed at that JWK addr. Second, installs the +/// JWK at this address and ensures TXN validation now succeeds. +#[test] +fn test_federated_keyless_at_jwk_addr() { + let mut h = MoveHarness::new_with_features( + vec![ + FeatureFlag::CRYPTOGRAPHY_ALGEBRA_NATIVES, + FeatureFlag::BN254_STRUCTURES, + FeatureFlag::KEYLESS_ACCOUNTS, + FeatureFlag::FEDERATED_KEYLESS, + ], + vec![], + ); + + let jwk_addr = AccountAddress::from_hex_literal("0xadd").unwrap(); + + // Step 1: Make sure TXN validation fails if JWKs are not installed at jwk_addr. + let (sig, pk) = get_sample_groth16_sig_and_pk(); + let sender = create_federated_keyless_account(&mut h, jwk_addr, pk); + let recipient = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); + let txn = spend_keyless_account(&mut h, sig.clone(), &sender, *recipient.address()); + let output = h.run_raw(txn); + + match output.status() { + TransactionStatus::Discard(status) => { + assert_eq!( + *status, INVALID_SIGNATURE, + "Expected TransactionStatus::Discard to be INVALID_SIGNATURE, but got: {:?}", + status + ) + }, + _ => panic!( + "Expected TransactionStatus::Discard, got: {:?}", + output.status() + ), + } + + // Step 1: Make sure TXN validation succeeds once JWKs are installed at jwk_addr. + let iss = get_sample_iss(); + let jwk = get_sample_jwk(); + let _core_resources = install_federated_jwks_and_set_keyless_config(&mut h, jwk_addr, iss, jwk); + + let txn = spend_keyless_account(&mut h, sig, &sender, *recipient.address()); + let output = h.run_raw(txn); + + assert_success!( + output.status().clone(), + "Expected TransactionStatus::Keep(ExecutionStatus::Success), but got: {:?}", + output.status() + ); +} + +/// Tests that the default JWKs at 0x1 work as an override when the JWKs at jwk_addr don't work. +#[test] +fn test_federated_keyless_override_at_0x1() { + let mut h = MoveHarness::new_with_features( + vec![ + FeatureFlag::CRYPTOGRAPHY_ALGEBRA_NATIVES, + FeatureFlag::BN254_STRUCTURES, + FeatureFlag::KEYLESS_ACCOUNTS, + FeatureFlag::FEDERATED_KEYLESS, + ], + vec![], + ); + + let jwk_addr = AccountAddress::from_hex_literal("0xadd").unwrap(); + let iss = get_sample_iss(); + let jwk = secure_test_rsa_jwk(); // this will be the wrong JWK + let _core_resources = install_federated_jwks_and_set_keyless_config(&mut h, jwk_addr, iss, jwk); + + // Step 1: Make sure the TXN does not validate, since the wrong JWK is installed at JWK addr + let (sig, pk) = get_sample_groth16_sig_and_pk(); + let sender = create_federated_keyless_account(&mut h, jwk_addr, pk); + let recipient = h.new_account_at(AccountAddress::from_hex_literal("0xb0b").unwrap()); + let txn = spend_keyless_account(&mut h, sig.clone(), &sender, *recipient.address()); + let output = h.run_raw(txn); + + match output.status() { + TransactionStatus::Discard(status) => { + assert_eq!( + *status, INVALID_SIGNATURE, + "Expected TransactionStatus::Discard to be INVALID_SIGNATURE, but got: {:?}", + status + ) + }, + _ => panic!( + "Expected TransactionStatus::Discard, got: {:?}", + output.status() + ), + } + + // Step 2: Install the correct JWK at 0x1 and resubmit the TXN; it should now validate + run_jwk_and_config_script(&mut h); + let txn = spend_keyless_account(&mut h, sig, &sender, *recipient.address()); + let output = h.run_raw(txn); + + assert_success!( + output.status().clone(), + "Expected TransactionStatus::Keep(ExecutionStatus::Success), but got: {:?}", + output.status() + ); +} + fn create_keyless_account(h: &mut MoveHarness, pk: KeylessPublicKey) -> Account { - let apk = AnyPublicKey::keyless(pk.clone()); - let addr = AuthenticationKey::any_key(apk.clone()).account_address(); + let addr = AuthenticationKey::any_key(AnyPublicKey::keyless(pk.clone())).account_address(); let account = h.store_and_fund_account( - &Account::new_from_addr(addr, AccountPublicKey::Keyless(pk)), + &Account::new_from_addr( + addr, + AccountPublicKey::AnyPublicKey(AnyPublicKey::Keyless { public_key: pk }), + ), 100000000, 0, ); @@ -251,8 +366,12 @@ fn spend_keyless_account( } sig.ephemeral_signature = EphemeralSignature::ed25519(esk.sign(&txn_and_zkp).unwrap()); - let transaction = - SignedTransaction::new_keyless(raw_txn, account.pubkey.as_keyless().unwrap(), sig); + let transaction = match account.pubkey.as_keyless().unwrap() { + AnyKeylessPublicKey::Normal(pk) => SignedTransaction::new_keyless(raw_txn, pk, sig), + AnyKeylessPublicKey::Federated(pk) => { + SignedTransaction::new_federated_keyless(raw_txn, pk, sig) + }, + }; println!( "Submitted TXN hash: {}", Transaction::UserTransaction(transaction.clone()).hash() @@ -260,6 +379,29 @@ fn spend_keyless_account( transaction } +fn create_federated_keyless_account( + h: &mut MoveHarness, + jwk_addr: AccountAddress, + pk: KeylessPublicKey, +) -> Account { + let fed_pk = FederatedKeylessPublicKey { jwk_addr, pk }; + let addr = AuthenticationKey::any_key(AnyPublicKey::federated_keyless(fed_pk.clone())) + .account_address(); + let account = h.store_and_fund_account( + &Account::new_from_addr( + addr, + AccountPublicKey::AnyPublicKey(AnyPublicKey::FederatedKeyless { public_key: fed_pk }), + ), + 100000000, + 0, + ); + + println!("Actual address: {}", addr.to_hex()); + println!("Account address: {}", account.address().to_hex()); + + account +} + /// Creates and funds a new account at `pk` and sends coins to `recipient`. fn create_and_spend_keyless_account( h: &mut MoveHarness, @@ -267,11 +409,12 @@ fn create_and_spend_keyless_account( pk: KeylessPublicKey, recipient: AccountAddress, ) -> SignedTransaction { - let account = create_keyless_account(h, pk.clone()); + let account = create_keyless_account(h, pk); spend_keyless_account(h, sig, &account, recipient) } +/// Sets the keyless configuration (Note: the VK is already set in genesis.) fn run_jwk_and_config_script(h: &mut MoveHarness) -> Account { let core_resources = h.new_account_at(AccountAddress::from_hex_literal("0xA550C18").unwrap()); @@ -305,13 +448,94 @@ fn run_jwk_and_config_script(h: &mut MoveHarness) -> Account { .sign(); // NOTE: We cannot write the Configuration and Groth16Verification key via MoveHarness::set_resource - // because it does not (yet) work with resource groups. + // because it does not (yet) work with resource groups. This is okay, because the VK will be + // there from genesis. assert_success!(h.run(txn)); core_resources } +/// Sets the keyless configuration and installs the sample RSA JWK as a federated JWK +/// (Note: the VK is already set in genesis.) +fn install_federated_jwks_and_set_keyless_config( + h: &mut MoveHarness, + jwk_owner: AccountAddress, + iss: String, + jwk: RSA_JWK, +) -> Account { + let core_resources = h.new_account_at(AccountAddress::from_hex_literal("0xA550C18").unwrap()); + + federated_keyless_init_config(h, core_resources.clone()); + + federated_keyless_install_jwk(h, jwk_owner, iss, jwk); + + core_resources +} + +fn federated_keyless_init_config(h: &mut MoveHarness, core_resources: Account) { + let package = build_package( + common::test_dir_path("federated_keyless_init_config.data/pack"), + aptos_framework::BuildOptions::default(), + ) + .expect("building package must succeed"); + + let txn = h.create_publish_built_package(&core_resources, &package, |_| {}); + assert_success!(h.run(txn)); + + let script = package.extract_script_code()[0].clone(); + + let config = Configuration::new_for_testing(); + + let txn = TransactionBuilder::new(core_resources.clone()) + .script(Script::new(script, vec![], vec![TransactionArgument::U64( + config.max_exp_horizon_secs, + )])) + .sequence_number(h.sequence_number(core_resources.address())) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign(); + + // NOTE: We cannot write the Configuration and Groth16Verification key via MoveHarness::set_resource + // because it does not (yet) work with resource groups. This is okay, because the VK will be + // there from genesis. + + assert_success!(h.run(txn)); +} + +fn federated_keyless_install_jwk( + h: &mut MoveHarness, + jwk_owner: AccountAddress, + iss: String, + jwk: RSA_JWK, +) { + let jwk_owner_account = h.new_account_at(jwk_owner); + + let txn = TransactionBuilder::new(jwk_owner_account.clone()) + .entry_function(EntryFunction::new( + ModuleId::new(CORE_CODE_ADDRESS, ident_str!("jwks").to_owned()), + ident_str!("update_federated_jwk_set").to_owned(), + vec![], + serialize_values(&vec![ + MoveValue::vector_u8(iss.into_bytes()), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.kid.into_bytes())]), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.alg.into_bytes())]), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.e.into_bytes())]), + MoveValue::Vector(vec![MoveValue::vector_u8(jwk.n.into_bytes())]), + ]), + )) + .sequence_number(h.sequence_number(jwk_owner_account.address())) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign(); + + // NOTE: We cannot write the Configuration and Groth16Verification key via MoveHarness::set_resource + // because it does not (yet) work with resource groups. This is okay, because the VK will be + // there from genesis. + + assert_success!(h.run(txn)); +} + fn run_upgrade_vk_script(h: &mut MoveHarness, core_resources: Account, vk: Groth16VerificationKey) { let package = build_package( common::test_dir_path("keyless_new_vk.data/pack"), diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/Move.toml b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/Move.toml new file mode 100644 index 0000000000000..82befc6b5c703 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/Move.toml @@ -0,0 +1,9 @@ +[package] +name = "LargePackageExample" +version = "0.0.0" +upgrade_policy = "compatible" +[dependencies] +SupraFramework = { local = "../../../../../framework/supra-framework" } + +[addresses] +large_package_example = "_" diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/eight.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/eight.move new file mode 100644 index 0000000000000..e3decbd981fc9 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/eight.move @@ -0,0 +1 @@ +module large_package_example::eight {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/five.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/five.move new file mode 100644 index 0000000000000..caf0559b7a40e --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/five.move @@ -0,0 +1 @@ +module large_package_example::five {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/four.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/four.move new file mode 100644 index 0000000000000..ca2b7e66c7207 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/four.move @@ -0,0 +1 @@ +module large_package_example::four {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/one.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/one.move new file mode 100644 index 0000000000000..faf2db53b59c0 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/one.move @@ -0,0 +1 @@ +module large_package_example::one {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/seven.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/seven.move new file mode 100644 index 0000000000000..d38cd0b0bf74c --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/seven.move @@ -0,0 +1 @@ +module large_package_example::seven {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/six.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/six.move new file mode 100644 index 0000000000000..5373b207c4fad --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/six.move @@ -0,0 +1 @@ +module large_package_example::six {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/three.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/three.move new file mode 100644 index 0000000000000..4aa308268d450 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/three.move @@ -0,0 +1 @@ +module large_package_example::three {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/two.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/two.move new file mode 100644 index 0000000000000..4197c8ac592dc --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/two.move @@ -0,0 +1 @@ +module large_package_example::two {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/zero.move b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/zero.move new file mode 100644 index 0000000000000..bc516399bb3ed --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.data/large_pack_upgrade_incompat/sources/zero.move @@ -0,0 +1 @@ +module large_package_example::zero {} diff --git a/aptos-move/e2e-move-tests/src/tests/large_package_publishing.rs b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.rs new file mode 100644 index 0000000000000..2476ff308c81b --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/large_package_publishing.rs @@ -0,0 +1,418 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{assert_move_abort, assert_success, assert_vm_status, tests::common, MoveHarness}; +use aptos_framework::{ + chunked_publish::{ + chunk_package_and_create_payloads, PublishType, LARGE_PACKAGES_MODULE_ADDRESS, + }, + natives::{ + code::{PackageMetadata, PackageRegistry, UpgradePolicy}, + object_code_deployment::ManagingRefs, + }, + BuildOptions, BuiltPackage, +}; +use aptos_language_e2e_tests::account::Account; +use aptos_types::{ + object_address::create_object_code_deployment_address, + transaction::{AbortInfo, TransactionPayload, TransactionStatus}, +}; +use move_core_types::{ + account_address::AccountAddress, parser::parse_struct_tag, vm_status::StatusCode, +}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, option::Option, path::Path}; + +/// Number of transactions needed for staging code chunks before publishing to accounts or objects +/// This is used to derive object address for testing object code deployment feature +const NUMBER_OF_TRANSACTIONS_FOR_STAGING: u64 = 2; + +/// Mimics `0xcafe::eight::State` +#[derive(Serialize, Deserialize)] +struct State { + value: u64, +} + +struct LargePackageTestContext { + harness: MoveHarness, + account: Account, // used for testing account code deployment for large packages + object_address: AccountAddress, // used for testing object code deployment for large packages +} + +impl LargePackageTestContext { + /// Create a new test context with initialized accounts and published `large_packages.move` module. + fn new() -> Self { + let mut harness = MoveHarness::new(); + let admin_account = harness.new_account_at( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ); + let account = harness.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + let sequence_number = harness.sequence_number(account.address()); + let object_address = create_object_code_deployment_address( + *account.address(), + sequence_number + NUMBER_OF_TRANSACTIONS_FOR_STAGING + 1, + ); + + // publish `large_packages.move` module + let build_option = Self::get_named_addresses_build_options(vec![( + String::from("large_packages"), + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + )]); + + let txn = harness.create_publish_package( + &admin_account, + &common::test_dir_path("../../../move-examples/large_packages"), + Some(build_option), + |_| {}, + ); + assert_success!(harness.run(txn)); + + LargePackageTestContext { + harness, + account, + object_address, + } + } + + fn get_named_addresses_build_options( + named_addresses: Vec<(String, AccountAddress)>, + ) -> BuildOptions { + let mut build_options = BuildOptions::default(); + let mut map = BTreeMap::new(); + for (k, v) in named_addresses { + map.insert(k, v); + } + build_options.named_addresses = map; + + build_options + } + + /// Publish a large package by creating and running the necessary transactions. + fn publish_large_package( + &mut self, + account: &Account, + path: &Path, + patch_metadata: impl FnMut(&mut PackageMetadata), + publish_type: PublishType, + ) -> Vec { + let deploy_address = match publish_type { + PublishType::AccountDeploy => AccountAddress::from_hex_literal("0xcafe").unwrap(), + PublishType::ObjectDeploy | PublishType::ObjectUpgrade => self.object_address, + }; + + let build_options = Self::get_named_addresses_build_options(vec![( + String::from("large_package_example"), + deploy_address, + )]); + let payloads = self.create_publish_large_package_from_path( + path, + Some(build_options), + patch_metadata, + publish_type, + ); + payloads + .into_iter() + .map(|payload| { + let signed_tx = self + .harness + .create_transaction_without_sign(account, payload) + .sign(); + self.harness.run(signed_tx) + }) + .collect() + } + + /// Create transactions for publishing a large package. + fn create_publish_large_package_from_path( + &mut self, + path: &Path, + options: Option, + mut patch_metadata: impl FnMut(&mut PackageMetadata), + publish_type: PublishType, + ) -> Vec { + let package = BuiltPackage::build(path.to_owned(), options.unwrap()) + .expect("package build must succeed"); + let package_code = package.extract_code(); + let mut metadata = package + .extract_metadata() + .expect("extracting package metadata must succeed"); + patch_metadata(&mut metadata); + let metadata_serialized = bcs::to_bytes(&metadata).expect("Failed deserializing metadata"); + chunk_package_and_create_payloads( + metadata_serialized, + package_code, + publish_type, + Some(self.object_address), + ) + } +} + +#[test] +fn large_package_publishing_basic() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Test transactions for publishing the large package are successful + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Validate metadata + let registry = context + .harness + .read_resource::( + acc.address(), + parse_struct_tag("0x1::code::PackageRegistry").unwrap(), + ) + .unwrap(); + assert_eq!(registry.packages.len(), 1); + assert_eq!(registry.packages[0].name, "LargePackageExample"); + assert_eq!(registry.packages[0].modules.len(), 9); // `LargePackageExample` package includes 9 modules + + // Validate code loaded as expected. + assert_success!(context.harness.run_entry_function( + &acc, + str::parse("0xcafe::eight::hello").unwrap(), + vec![], + vec![bcs::to_bytes::(&42).unwrap()] + )); + let state = context + .harness + .read_resource::( + acc.address(), + parse_struct_tag("0xcafe::eight::State").unwrap(), + ) + .unwrap(); + assert_eq!(state.value, 42); +} + +#[test] +fn large_package_upgrade_success_compat() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrade to compatible version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), // upgrade with the same package + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } +} + +#[test] +fn large_package_upgrade_fail_compat() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrade to incompatible version should fail + // Staging metadata and code should pass, and the final publishing transaction should fail + let mut tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("large_package_publishing.data/large_pack_upgrade_incompat"), + |_| {}, + PublishType::AccountDeploy, + ); + + let last_tx_status = tx_statuses.pop().unwrap(); // transaction for publishing + + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + assert_vm_status!( + last_tx_status, + StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE + ); +} + +#[test] +fn large_package_upgrade_fail_immutable() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version (immutable package) + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |metadata| metadata.upgrade_policy = UpgradePolicy::immutable(), + PublishType::AccountDeploy, + ); + + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrading immutable package should fail + // Staging metadata and code should pass, and the final publishing transaction should fail + let mut tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + let last_tx_status = tx_statuses.pop().unwrap(); // transaction for publishing + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + let abort_info = Some(AbortInfo { + reason_name: "EUPGRADE_IMMUTABLE".to_string(), + description: "Cannot upgrade an immutable package".to_string(), + }); + assert_move_abort!(last_tx_status, abort_info); +} + +#[test] +fn large_package_upgrade_fail_overlapping_module() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::AccountDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Publishing the same package with different name should fail + // Staging metadata and code should pass, and the final publishing transaction should fail + let mut tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |metadata| metadata.name = "other_large_pack".to_string(), + PublishType::AccountDeploy, + ); + + let last_tx_status = tx_statuses.pop().unwrap(); // transaction for publishing + + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + let abort_info = Some(AbortInfo { + reason_name: "EMODULE_NAME_CLASH".to_string(), + description: "Package contains duplicate module names with existing modules publised in other packages on this address".to_string(), + }); + assert_move_abort!(last_tx_status, abort_info); +} + +#[test] +fn large_package_object_code_deployment_basic() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Test transactions for publishing the large package are successful + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::ObjectDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Validate metadata + let registry = context + .harness + .read_resource::( + &context.object_address, + parse_struct_tag("0x1::code::PackageRegistry").unwrap(), + ) + .unwrap(); + assert_eq!(registry.packages.len(), 1); + assert_eq!(registry.packages[0].name, "LargePackageExample"); + assert_eq!(registry.packages[0].modules.len(), 9); + + let code_object: ManagingRefs = context + .harness + .read_resource_from_resource_group( + &context.object_address, + parse_struct_tag("0x1::object::ObjectGroup").unwrap(), + parse_struct_tag("0x1::object_code_deployment::ManagingRefs").unwrap(), + ) + .unwrap(); + // Verify the object created owns the `ManagingRefs` + assert_eq!(code_object, ManagingRefs::new(context.object_address)); + + let module_address = context.object_address.to_string(); + + // Validate code loaded as expected. + assert_success!(context.harness.run_entry_function( + &acc, + str::parse(&format!("{}::eight::hello", module_address)).unwrap(), + vec![], + vec![bcs::to_bytes::(&42).unwrap()] + )); + + let state = context + .harness + .read_resource::( + acc.address(), + parse_struct_tag(&format!("{}::eight::State", module_address)).unwrap(), + ) + .unwrap(); + + assert_eq!(state.value, 42); +} + +#[test] +fn large_package_object_code_deployment_upgrade_success_compat() { + let mut context = LargePackageTestContext::new(); + let acc = context.account.clone(); + + // Initial version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), + |_| {}, + PublishType::ObjectDeploy, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } + + // Upgrade to compatible version + let tx_statuses = context.publish_large_package( + &acc, + &common::test_dir_path("../../../move-examples/large_packages/large_package_example"), // upgrade with the same package + |_| {}, + PublishType::ObjectUpgrade, + ); + for tx_status in tx_statuses.into_iter() { + assert_success!(tx_status); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/lazy_natives.rs b/aptos-move/e2e-move-tests/src/tests/lazy_natives.rs index 6910f79b48754..4e78b63085c02 100644 --- a/aptos-move/e2e-move-tests/src/tests/lazy_natives.rs +++ b/aptos-move/e2e-move-tests/src/tests/lazy_natives.rs @@ -3,14 +3,15 @@ use crate::{assert_success, assert_vm_status, MoveHarness}; use aptos_package_builder::PackageBuilder; -use aptos_types::account_address::AccountAddress; +use aptos_types::{account_address::AccountAddress, on_chain_config::FeatureFlag}; use move_core_types::vm_status::StatusCode; #[test] fn lazy_natives() { let mut h = MoveHarness::new(); let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - + // Set flag to publish the package. + h.enable_features(vec![], vec![FeatureFlag::DISALLOW_USER_NATIVES]); let mut builder = PackageBuilder::new("LazyNatives"); builder.add_source( "test", @@ -28,6 +29,7 @@ module 0xcafe::test { // Should be able to publish with unbound native. assert_success!(h.publish_package(&acc, dir.path())); + h.enable_features(vec![], vec![FeatureFlag::DISALLOW_USER_NATIVES]); // Should be able to call nothing entry assert_success!(h.run_entry_function( &acc, diff --git a/aptos-move/e2e-move-tests/src/tests/metadata.rs b/aptos-move/e2e-move-tests/src/tests/metadata.rs index 7c499fe762bcd..b60a2d6f584bf 100644 --- a/aptos-move/e2e-move-tests/src/tests/metadata.rs +++ b/aptos-move/e2e-move-tests/src/tests/metadata.rs @@ -14,7 +14,7 @@ use aptos_package_builder::PackageBuilder; use aptos_types::{ chain_id::ChainId, on_chain_config::{FeatureFlag, OnChainConfig}, - transaction::TransactionStatus, + transaction::{Script, TransactionPayload, TransactionStatus}, }; use move_binary_format::CompiledModule; use move_core_types::{ @@ -151,7 +151,7 @@ fn test_duplicate_compilation_metadata_entries() { }; let result = test_compilation_metadata_with_changes( duplicate_compilation_metatdata, - CompilerVersion::V2_0, + CompilerVersion::V2_1, ); assert_vm_status!(result, StatusCode::CONSTRAINT_NOT_SATISFIED); let result = test_compilation_metadata_with_changes( @@ -230,7 +230,7 @@ fn test_compilation_metadata_internal( let path = builder.write_to_temp().unwrap(); let compiler_version = if v2_flag { - CompilerVersion::V2_0 + CompilerVersion::V2_1 } else { CompilerVersion::V1 }; @@ -269,6 +269,118 @@ fn test_compilation_metadata_internal( } } +fn test_compilation_metadata_script_internal( + mainnet_flag: bool, + v2_flag: bool, + feature_enabled: bool, +) -> TransactionStatus { + let mut h = MoveHarness::new(); + if feature_enabled { + h.enable_features( + vec![FeatureFlag::REJECT_UNSTABLE_BYTECODE_FOR_SCRIPT], + vec![], + ); + } else { + h.enable_features(vec![], vec![ + FeatureFlag::REJECT_UNSTABLE_BYTECODE_FOR_SCRIPT, + ]); + } + let account = h.new_account_at(AccountAddress::from_hex_literal("0xf00d").unwrap()); + let mut builder = PackageBuilder::new("Package"); + builder.add_source( + "m.move", + r#" + script { + fun main() { } + } + "#, + ); + let path = builder.write_to_temp().unwrap(); + + let compiler_version = if v2_flag { + CompilerVersion::V2_1 + } else { + CompilerVersion::V1 + }; + let package = build_package_with_compiler_version( + path.path().to_path_buf(), + BuildOptions::default(), + compiler_version, + ) + .expect("building package must succeed"); + + let code = package.extract_script_code().into_iter().next().unwrap(); + + let script = TransactionPayload::Script(Script::new(code, vec![], vec![])); + + if mainnet_flag { + h.set_resource( + CORE_CODE_ADDRESS, + ChainId::struct_tag(), + &ChainId::mainnet().id(), + ); + h.run_transaction_payload_mainnet(&account, script) + } else { + h.run_transaction_payload(&account, script) + } +} + +#[test] +fn test_compilation_metadata_for_script() { + let mut enable_check = true; + // run compiler v2 code to mainnet + assert_vm_status!( + test_compilation_metadata_script_internal(true, true, enable_check), + StatusCode::UNSTABLE_BYTECODE_REJECTED + ); + // run compiler v1 code to mainnet + assert_success!(test_compilation_metadata_script_internal( + true, + false, + enable_check + )); + // run compiler v2 code to test + assert_success!(test_compilation_metadata_script_internal( + false, + true, + enable_check + )); + // run compiler v1 code to test + assert_success!(test_compilation_metadata_script_internal( + false, + false, + enable_check + )); + + enable_check = false; + // run compiler v2 code to mainnet + // success because the feature flag is turned off + assert_success!(test_compilation_metadata_script_internal( + true, + true, + enable_check + ),); + // run compiler v1 code to mainnet + assert_success!(test_compilation_metadata_script_internal( + true, + false, + enable_check + )); + // run compiler v2 code to test + // success because the feature flag is turned off + assert_success!(test_compilation_metadata_script_internal( + false, + true, + enable_check + ),); + // run compiler v1 code to test + assert_success!(test_compilation_metadata_script_internal( + false, + false, + enable_check + )); +} + #[test] fn test_compilation_metadata() { let mut enable_check = true; diff --git a/aptos-move/e2e-move-tests/src/tests/mod.rs b/aptos-move/e2e-move-tests/src/tests/mod.rs index c407d74e82e47..33c8ac97e7ea0 100644 --- a/aptos-move/e2e-move-tests/src/tests/mod.rs +++ b/aptos-move/e2e-move-tests/src/tests/mod.rs @@ -5,6 +5,7 @@ mod access_path_test; mod account; mod aggregator; mod aggregator_v2; +mod aggregator_v2_enums; mod aggregator_v2_events; mod aggregator_v2_runtime_checks; mod attributes; @@ -13,6 +14,7 @@ mod code_publishing; mod common; mod constructor_args; mod dependencies; +mod enum_upgrade; mod error_map; mod fee_payer; mod fungible_asset; @@ -22,6 +24,7 @@ mod governance_updates; mod infinite_loop; mod init_module; mod keyless_feature_gating; +mod large_package_publishing; mod lazy_natives; mod max_loop_depth; mod memory_quota; @@ -29,6 +32,7 @@ mod metadata; mod mint_nft; mod missing_gas_parameter; mod module_event; +mod move_feature_gating; mod new_integer_types; mod nft_dao; mod object_code_deployment; @@ -45,6 +49,7 @@ mod stake; mod state_metadata; mod storage_refund; mod string_args; +mod test_self; mod token_event_store; mod token_objects; mod transaction_context; diff --git a/aptos-move/e2e-move-tests/src/tests/move_feature_gating.rs b/aptos-move/e2e-move-tests/src/tests/move_feature_gating.rs new file mode 100644 index 0000000000000..0445fcc1a05ee --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/move_feature_gating.rs @@ -0,0 +1,78 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for gating of Move language features + +// Note: this module uses parameterized tests via the +// [`rstest` crate](https://crates.io/crates/rstest) +// to test for multiple feature combinations. + +use crate::{assert_success, assert_vm_status, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_package_builder::PackageBuilder; +use aptos_types::{account_address::AccountAddress, on_chain_config::FeatureFlag}; +use move_core_types::vm_status::StatusCode; +use rstest::rstest; + +#[rstest(enabled, disabled, + case(vec![], vec![FeatureFlag::ENABLE_ENUM_TYPES]), + case(vec![FeatureFlag::ENABLE_ENUM_TYPES], vec![]), +)] +fn enum_types(enabled: Vec, disabled: Vec) { + let positive_test = !enabled.is_empty(); + let mut h = MoveHarness::new_with_features(enabled, disabled); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x815").unwrap()); + + let mut builder = PackageBuilder::new("Package"); + let source = r#" + module 0x815::m { + enum E { Black, White } + fun dark(e: E): bool { + match (e) { + Black => true, + White => false + } + } + } + "#; + builder.add_source("m.move", source); + let path = builder.write_to_temp().unwrap(); + let result = h.publish_package_with_options(&acc, path.path(), BuildOptions::move_2()); + if positive_test { + assert_success!(result); + } else { + assert_vm_status!(result, StatusCode::FEATURE_NOT_ENABLED) + } +} + +#[rstest(enabled, disabled, + case(vec![], vec![FeatureFlag::ENABLE_RESOURCE_ACCESS_CONTROL]), + case(vec![FeatureFlag::ENABLE_RESOURCE_ACCESS_CONTROL], vec![]), +)] +fn resource_access_control(enabled: Vec, disabled: Vec) { + let positive_test = !enabled.is_empty(); + let mut h = MoveHarness::new_with_features(enabled, disabled); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x815").unwrap()); + + let mut builder = PackageBuilder::new("Package"); + let source = r#" + module 0x815::m { + struct R has key, copy {} + fun read(a: address): R reads R { + *borrow_global(a) + } + } + "#; + builder.add_source("m.move", source); + let path = builder.write_to_temp().unwrap(); + let result = h.publish_package_with_options( + &acc, + path.path(), + BuildOptions::move_2().with_experiment("gen-access-specifiers"), + ); + if positive_test { + assert_success!(result); + } else { + assert_vm_status!(result, StatusCode::FEATURE_NOT_ENABLED); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/script_with_object_param.data/pack/Move.toml b/aptos-move/e2e-move-tests/src/tests/script_with_object_param.data/pack/Move.toml new file mode 100644 index 0000000000000..23081e7b177d5 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/script_with_object_param.data/pack/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "test" +version = "0.0.0" + +[addresses] +example_addr = "_" + +[dependencies] +SupraFramework = { local = "../../../../../framework/supra-framework" } +ManagedFungibleAsset = { local = "../../../../../move-examples/fungible_asset/managed_fungible_asset" } \ No newline at end of file diff --git a/aptos-move/e2e-move-tests/src/tests/script_with_object_param.data/pack/sources/test.move b/aptos-move/e2e-move-tests/src/tests/script_with_object_param.data/pack/sources/test.move new file mode 100644 index 0000000000000..90448fadb3644 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/script_with_object_param.data/pack/sources/test.move @@ -0,0 +1,15 @@ +script { + use supra_framework::fungible_asset::Metadata; + use supra_framework::object::Object; + use example_addr::managed_fungible_asset::transfer_between_primary_stores; + + fun main( + admin: &signer, + asset: Object, + from: vector
, + to: vector
, + amounts: vector, + ) { + transfer_between_primary_stores(admin, asset, from, to, amounts); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/scripts.rs b/aptos-move/e2e-move-tests/src/tests/scripts.rs index fa8c8d392420e..40395dba22ea4 100644 --- a/aptos-move/e2e-move-tests/src/tests/scripts.rs +++ b/aptos-move/e2e-move-tests/src/tests/scripts.rs @@ -5,10 +5,118 @@ use crate::{assert_success, build_package, tests::common, MoveHarness}; use aptos_language_e2e_tests::account::TransactionBuilder; use aptos_types::{ account_address::AccountAddress, + on_chain_config::FeatureFlag, transaction::{Script, TransactionArgument}, }; use move_core_types::language_storage::TypeTag; +#[test] +fn test_script_with_object_parameter() { + let mut h = MoveHarness::new(); + + let alice = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + let bob = h.new_account_at(AccountAddress::from_hex_literal("0xface").unwrap()); + let root = h.aptos_framework_account(); + + let mut build_options = aptos_framework::BuildOptions::default(); + build_options + .named_addresses + .insert("example_addr".to_string(), *alice.address()); + + let result = h.publish_package_with_options( + &alice, + &common::test_dir_path("../../../move-examples/fungible_asset/managed_fungible_asset"), + build_options.clone(), + ); + + assert_success!(result); + let result = h.publish_package_with_options( + &alice, + &common::test_dir_path("../../../move-examples/fungible_asset/managed_fungible_token"), + build_options.clone(), + ); + assert_success!(result); + + assert_success!(h.run_entry_function( + &root, + str::parse(&format!( + "0x{}::coin::create_coin_conversion_map", + (*root.address()).to_hex() + )) + .unwrap(), + vec![], + vec![], + )); + + let metadata = h + .execute_view_function( + str::parse(&format!( + "0x{}::managed_fungible_token::get_metadata", + (*alice.address()).to_hex() + )) + .unwrap(), + vec![], + vec![], + ) + .values + .unwrap() + .pop() + .unwrap(); + let metadata = bcs::from_bytes::(metadata.as_slice()).unwrap(); + + let result = h.run_entry_function( + &alice, + str::parse(&format!( + "0x{}::managed_fungible_asset::mint_to_primary_stores", + (*alice.address()).to_hex() + )) + .unwrap(), + vec![], + vec![ + bcs::to_bytes(&metadata).unwrap(), + bcs::to_bytes(&vec![alice.address()]).unwrap(), + bcs::to_bytes(&vec![100u64]).unwrap(), // amount + ], + ); + assert_success!(result); + + let package = build_package( + common::test_dir_path("script_with_object_param.data/pack"), + build_options, + ) + .expect("building package must succeed"); + + let code = package.extract_script_code().into_iter().next().unwrap(); + let script = Script::new(code, vec![], vec![ + TransactionArgument::Serialized(bcs::to_bytes(&metadata).unwrap()), + TransactionArgument::Serialized(bcs::to_bytes(&vec![alice.address()]).unwrap()), + TransactionArgument::Serialized(bcs::to_bytes(&vec![bob.address()]).unwrap()), + TransactionArgument::Serialized(bcs::to_bytes(&vec![30u64]).unwrap()), + ]); + + let txn = TransactionBuilder::new(alice.clone()) + .script(script.clone()) + .sequence_number(13) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign(); + + let status = h.run(txn); + assert_success!(status); + + h.enable_features(vec![], vec![FeatureFlag::ALLOW_SERIALIZED_SCRIPT_ARGS]); + + let txn = TransactionBuilder::new(alice.clone()) + .script(script.clone()) + .sequence_number(14) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign(); + + let status = h.run(txn); + assert!(status.is_discarded()); +} + #[test] fn test_script_with_type_parameter() { let mut h = MoveHarness::new(); diff --git a/aptos-move/e2e-move-tests/src/tests/smart_data_structures_self.data/Move.toml b/aptos-move/e2e-move-tests/src/tests/smart_data_structures_self.data/Move.toml new file mode 100644 index 0000000000000..afd9cf30408aa --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/smart_data_structures_self.data/Move.toml @@ -0,0 +1,7 @@ +[package] +name = "TestSmartDataStructures" +version = "0.0.0" + +[dependencies] +SupraFramework = { local = "../../../../framework/supra-framework" } +AptosStdlib = { local = "../../../../framework/aptos-stdlib" } diff --git a/aptos-move/e2e-move-tests/src/tests/smart_data_structures_self.data/sources/test.move b/aptos-move/e2e-move-tests/src/tests/smart_data_structures_self.data/sources/test.move new file mode 100644 index 0000000000000..4aa01d51c3c13 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/smart_data_structures_self.data/sources/test.move @@ -0,0 +1,103 @@ +module 0xcafe::test { + use aptos_std::smart_vector::{Self, SmartVector}; + use aptos_std::smart_table::{Self, SmartTable}; + use aptos_std::table::{Self, Table}; + use std::vector; + use std::signer; + + struct SmartVectorStore has key { + v: SmartVector + } + + struct VectorStore has key { + v: vector + } + + struct SmartTableStore has key { + t: SmartTable + } + + struct TableStore has key { + t: Table + } + + public entry fun create_smart_vector(acct: &signer) { + let v = smart_vector::empty(); + let i: u64 = 0; + while (i < 5000) { + (&mut v).push_back(i); + i = i + 1; + }; + move_to(acct, SmartVectorStore { v }); + } + + public entry fun update_smart_vector(acct: &signer) acquires SmartVectorStore { + let v = &mut borrow_global_mut(signer::address_of(acct)).v; + v.push_back(5000); + } + + public entry fun read_smart_vector(acct: &signer) acquires SmartVectorStore { + let v = &borrow_global(signer::address_of(acct)).v; + v.borrow(2000); + } + + public entry fun create_vector(acct: &signer) { + let v = vector::empty(); + let i: u64 = 0; + while (i < 5000) { + (&mut v).push_back(i); + i = i + 1; + }; + move_to(acct, VectorStore { v }); + } + + public entry fun update_vector(acct: &signer) acquires VectorStore { + let v = &mut borrow_global_mut(signer::address_of(acct)).v; + v.push_back(5000); + } + + public entry fun read_vector(acct: &signer) acquires VectorStore { + let v = &borrow_global(signer::address_of(acct)).v; + v.borrow(2000); + } + + public entry fun create_smart_table(acct: &signer) { + let t = smart_table::new(); + let i: u64 = 0; + while (i < 1000) { + (&mut t).add(i, i); + i = i + 1; + }; + move_to(acct, SmartTableStore { t }); + } + + public entry fun update_smart_table(acct: &signer) acquires SmartTableStore { + let t = &mut borrow_global_mut(signer::address_of(acct)).t; + t.add(1001, 1001); + } + + public entry fun read_smart_table(acct: &signer) acquires SmartTableStore { + let t = &borrow_global(signer::address_of(acct)).t; + t.borrow(500); + } + + public entry fun create_table(acct: &signer) { + let t = table::new(); + let i: u64 = 0; + while (i < 1000) { + t.add(i, i); + i = i + 1; + }; + move_to(acct, TableStore { t }); + } + + public entry fun update_table(acct: &signer) acquires TableStore { + let t = &mut borrow_global_mut(signer::address_of(acct)).t; + t.add(1001, 1001); + } + + public entry fun read_table(acct: &signer) acquires TableStore { + let t = &borrow_global(signer::address_of(acct)).t; + t.borrow(500); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/test_self.rs b/aptos-move/e2e-move-tests/src/tests/test_self.rs new file mode 100644 index 0000000000000..14fce50377f08 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/test_self.rs @@ -0,0 +1,18 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{assert_success, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_types::account_address::AccountAddress; + +#[test] +fn test_self() { + let mut h = MoveHarness::new(); + // Load the code + let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); + assert_success!(h.publish_package_with_options( + &acc, + &common::test_dir_path("smart_data_structures_self.data"), + BuildOptions::move_2() + )); +} diff --git a/aptos-move/e2e-move-tests/src/tests/vm.rs b/aptos-move/e2e-move-tests/src/tests/vm.rs index 635061e2b9ffc..cd939ed97180f 100644 --- a/aptos-move/e2e-move-tests/src/tests/vm.rs +++ b/aptos-move/e2e-move-tests/src/tests/vm.rs @@ -49,7 +49,6 @@ fn failed_transaction_cleanup_charges_gas(status_code: StatusCode) { status_code ); let write_set: Vec<(&StateKey, &WriteOp)> = output - .change_set() .concrete_write_set_iter() .map(|(k, v)| (k, assert_some!(v))) .collect(); diff --git a/aptos-move/e2e-tests/Cargo.toml b/aptos-move/e2e-tests/Cargo.toml index 859110fee4302..d19a2b61d2a94 100644 --- a/aptos-move/e2e-tests/Cargo.toml +++ b/aptos-move/e2e-tests/Cargo.toml @@ -51,6 +51,9 @@ rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } +[dev-dependencies] +claims = { workspace = true } + [features] default = [] fuzzing = [] diff --git a/aptos-move/e2e-tests/src/account.rs b/aptos-move/e2e-tests/src/account.rs index 0a3c5d8ad9c2f..ad42de936b390 100644 --- a/aptos-move/e2e-tests/src/account.rs +++ b/aptos-move/e2e-tests/src/account.rs @@ -10,16 +10,21 @@ use aptos_keygen::KeyGen; use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, - account_config::{self, AccountResource, CoinStoreResource}, + account_config::{ + self, primary_apt_store, AccountResource, CoinStoreResource, + ConcurrentFungibleBalanceResource, FungibleStoreResource, MigrationFlag, + ObjectCoreResource, ObjectGroupResource, + }, chain_id::ChainId, event::{EventHandle, EventKey}, - keyless::KeylessPublicKey, + keyless::AnyKeylessPublicKey, state_store::state_key::StateKey, transaction::{ authenticator::{AnyPublicKey, AuthenticationKey}, EntryFunction, RawTransaction, Script, SignedTransaction, TransactionPayload, }, write_set::{WriteOp, WriteSet, WriteSetMut}, + SupraCoinType, }; use aptos_vm_genesis::GENESIS_KEYPAIR; use move_core_types::move_resource::MoveStructType; @@ -30,27 +35,38 @@ pub const DEFAULT_EXPIRATION_TIME: u64 = 4_000_000; #[derive(Debug, Clone, Eq, PartialEq)] pub enum AccountPublicKey { Ed25519(Ed25519PublicKey), - Keyless(KeylessPublicKey), + AnyPublicKey(AnyPublicKey), } impl AccountPublicKey { pub fn to_bytes(&self) -> Vec { match self { AccountPublicKey::Ed25519(pk) => pk.to_bytes().to_vec(), - AccountPublicKey::Keyless(pk) => pk.to_bytes(), + AccountPublicKey::AnyPublicKey(pk) => pk.to_bytes().to_vec(), } } pub fn as_ed25519(&self) -> Option { match self { AccountPublicKey::Ed25519(pk) => Some(pk.clone()), - AccountPublicKey::Keyless(_) => None, + AccountPublicKey::AnyPublicKey(pk) => match pk { + AnyPublicKey::Ed25519 { public_key } => Some(public_key.clone()), + _ => None, + }, } } - pub fn as_keyless(&self) -> Option { + pub fn as_keyless(&self) -> Option { match self { - AccountPublicKey::Keyless(pk) => Some(pk.clone()), + AccountPublicKey::AnyPublicKey(pk) => match pk { + AnyPublicKey::Keyless { public_key } => { + Some(AnyKeylessPublicKey::Normal(public_key.clone())) + }, + AnyPublicKey::FederatedKeyless { public_key } => { + Some(AnyKeylessPublicKey::Federated(public_key.clone())) + }, + _ => None, + }, AccountPublicKey::Ed25519(_) => None, } } @@ -66,7 +82,8 @@ impl AccountPublicKey { pub struct Account { addr: AccountAddress, /// The current private key for this account. - /// TODO: When `pubkey` is of type `AccountPublicKey::Keyless`, this will be undefined. + /// TODO: Refactor appropriately since, for example, when `pubkey` is of type + /// `AccountPublicKey::AnyPublicKey::Keyless`, this `privkey` field will be undefined. pub privkey: Ed25519PrivateKey, /// The current public key for this account. pub pubkey: AccountPublicKey, @@ -171,8 +188,11 @@ impl Account { /// /// Use this to retrieve or publish the Account CoinStore blob. pub fn make_coin_store_access_path(&self) -> AccessPath { - AccessPath::resource_access_path(self.addr, CoinStoreResource::struct_tag()) - .expect("access path in test") + AccessPath::resource_access_path( + self.addr, + CoinStoreResource::::struct_tag(), + ) + .expect("access path in test") } /// Changes the keys for this account to the provided ones. @@ -187,9 +207,7 @@ impl Account { pub fn auth_key(&self) -> Vec { match &self.pubkey { AccountPublicKey::Ed25519(pk) => AuthenticationKey::ed25519(pk), - AccountPublicKey::Keyless(pk) => { - AuthenticationKey::any_key(AnyPublicKey::keyless(pk.clone())) - }, + AccountPublicKey::AnyPublicKey(pk) => AuthenticationKey::any_key(pk.clone()), } .to_vec() } @@ -381,7 +399,7 @@ impl CoinStore { /// Returns the Move Value for the account's CoinStore pub fn to_bytes(&self) -> Vec { - let coin_store = CoinStoreResource::new( + let coin_store = CoinStoreResource::::new( self.coin, self.frozen, self.deposit_events.clone(), @@ -391,6 +409,77 @@ impl CoinStore { } } +/// Struct that represents an account FungibleStore resource for tests. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct FungibleStore { + pub owner: AccountAddress, + pub metadata: AccountAddress, + pub balance: u64, + pub frozen: bool, + pub concurrent_balance: bool, +} + +impl FungibleStore { + pub fn new( + owner: AccountAddress, + metadata: AccountAddress, + balance: u64, + frozen: bool, + concurrent_balance: bool, + ) -> Self { + Self { + owner, + metadata, + balance, + frozen, + concurrent_balance, + } + } + + /// Retrieve the balance inside of this + pub fn balance(&self) -> u64 { + self.balance + } + + pub fn to_bytes(&self) -> Vec { + let primary_store_object_address = primary_apt_store(self.owner); + let mut object_group = ObjectGroupResource::default(); + object_group.insert( + ObjectCoreResource::struct_tag(), + bcs::to_bytes(&ObjectCoreResource::new( + self.owner, + false, + new_event_handle(0, primary_store_object_address), + )) + .unwrap(), + ); + object_group.insert( + FungibleStoreResource::struct_tag(), + bcs::to_bytes(&FungibleStoreResource::new( + self.metadata, + if self.concurrent_balance { + 0 + } else { + self.balance + }, + self.frozen, + )) + .unwrap(), + ); + if self.concurrent_balance { + object_group.insert( + ConcurrentFungibleBalanceResource::struct_tag(), + bcs::to_bytes(&ConcurrentFungibleBalanceResource::new(self.balance)).unwrap(), + ); + } + object_group.insert( + MigrationFlag::struct_tag(), + bcs::to_bytes(&MigrationFlag::default()).unwrap(), + ); + bcs::to_bytes(&object_group).unwrap() + } +} + //--------------------------------------------------------------------------- // Account resource representation //--------------------------------------------------------------------------- @@ -404,7 +493,8 @@ pub struct AccountData { sequence_number: u64, coin_register_events: EventHandle, key_rotation_events: EventHandle, - coin_store: CoinStore, + coin_store: Option, + fungible_store: Option, } fn new_event_handle(count: u64, address: AccountAddress) -> EventHandle { @@ -416,7 +506,7 @@ impl AccountData { /// /// This constructor is non-deterministic and should not be used against golden file. pub fn new(balance: u64, sequence_number: u64) -> Self { - Self::with_account(Account::new(), balance, sequence_number) + Self::with_account(Account::new(), balance, sequence_number, false, false) } pub fn increment_sequence_number(&mut self) { @@ -427,12 +517,33 @@ impl AccountData { /// /// Most tests will want to use this constructor. pub fn new_from_seed(seed: &mut KeyGen, balance: u64, sequence_number: u64) -> Self { - Self::with_account(Account::new_from_seed(seed), balance, sequence_number) + Self::with_account( + Account::new_from_seed(seed), + balance, + sequence_number, + false, + false, + ) } /// Creates a new `AccountData` with the provided account. - pub fn with_account(account: Account, balance: u64, sequence_number: u64) -> Self { - Self::with_account_and_event_counts(account, balance, sequence_number, 0, 0) + pub fn with_account( + account: Account, + balance: u64, + sequence_number: u64, + use_fa_apt: bool, + use_concurrent_balance: bool, + ) -> Self { + if use_fa_apt { + Self::with_account_and_fungible_store( + account, + balance, + sequence_number, + use_concurrent_balance, + ) + } else { + Self::with_account_and_event_counts(account, balance, sequence_number, 0, 0) + } } /// Creates a new `AccountData` with the provided account. @@ -443,7 +554,7 @@ impl AccountData { sequence_number: u64, ) -> Self { let account = Account::with_keypair(privkey, pubkey); - Self::with_account(account, balance, sequence_number) + Self::with_account(account, balance, sequence_number, false, false) } /// Creates a new `AccountData` with custom parameters. @@ -457,11 +568,36 @@ impl AccountData { let addr = *account.address(); Self { account, - coin_store: CoinStore::new( + coin_store: Some(CoinStore::new( balance, new_event_handle(received_events_count, addr), new_event_handle(sent_events_count, addr), - ), + )), + fungible_store: None, + sequence_number, + coin_register_events: new_event_handle(0, addr), + key_rotation_events: new_event_handle(1, addr), + } + } + + /// Creates a new `AccountData` with custom parameters. + pub fn with_account_and_fungible_store( + account: Account, + fungible_balance: u64, + sequence_number: u64, + use_concurrent_balance: bool, + ) -> Self { + let addr = *account.address(); + Self { + account, + coin_store: None, + fungible_store: Some(FungibleStore::new( + addr, + AccountAddress::TEN, + fungible_balance, + false, + use_concurrent_balance, + )), sequence_number, coin_register_events: new_event_handle(0, addr), key_rotation_events: new_event_handle(1, addr), @@ -501,16 +637,30 @@ impl AccountData { /// Creates a writeset that contains the account data and can be patched to the storage /// directly. pub fn to_writeset(&self) -> WriteSet { - let write_set = vec![ - ( - StateKey::resource_typed::(self.address()).unwrap(), - WriteOp::legacy_modification(self.to_bytes().into()), - ), - ( - StateKey::resource_typed::(self.address()).unwrap(), - WriteOp::legacy_modification(self.coin_store.to_bytes().into()), - ), - ]; + let mut write_set = vec![( + StateKey::resource_typed::(self.address()).unwrap(), + WriteOp::legacy_modification(self.to_bytes().into()), + )]; + + if let Some(coin_store) = &self.coin_store { + write_set.push(( + StateKey::resource_typed::>(self.address()) + .unwrap(), + WriteOp::legacy_modification(coin_store.to_bytes().into()), + )); + } + + if let Some(fungible_store) = &self.fungible_store { + let primary_store_object_address = primary_apt_store(*self.address()); + + write_set.push(( + StateKey::resource_group( + &primary_store_object_address, + &ObjectGroupResource::struct_tag(), + ), + WriteOp::legacy_modification(fungible_store.to_bytes().into()), + )); + } WriteSetMut::new(write_set).freeze().unwrap() } @@ -534,8 +684,12 @@ impl AccountData { } /// Returns the initial balance. - pub fn balance(&self) -> u64 { - self.coin_store.coin() + pub fn coin_balance(&self) -> Option { + self.coin_store.as_ref().map(CoinStore::coin) + } + + pub fn fungible_balance(&self) -> Option { + self.fungible_store.as_ref().map(FungibleStore::balance) } /// Returns the initial sequence number. @@ -545,21 +699,21 @@ impl AccountData { /// Returns the unique key for this sent events stream. pub fn sent_events_key(&self) -> &EventKey { - self.coin_store.withdraw_events.key() + self.coin_store.as_ref().unwrap().withdraw_events.key() } /// Returns the initial sent events count. pub fn sent_events_count(&self) -> u64 { - self.coin_store.withdraw_events.count() + self.coin_store.as_ref().unwrap().withdraw_events.count() } /// Returns the unique key for this received events stream. pub fn received_events_key(&self) -> &EventKey { - self.coin_store.deposit_events.key() + self.coin_store.as_ref().unwrap().deposit_events.key() } /// Returns the initial received events count. pub fn received_events_count(&self) -> u64 { - self.coin_store.deposit_events.count() + self.coin_store.as_ref().unwrap().deposit_events.count() } } diff --git a/aptos-move/e2e-tests/src/account_universe.rs b/aptos-move/e2e-tests/src/account_universe.rs index 2283c7392c8f5..a2ec9a1ab7883 100644 --- a/aptos-move/e2e-tests/src/account_universe.rs +++ b/aptos-move/e2e-tests/src/account_universe.rs @@ -128,7 +128,7 @@ pub struct AccountCurrent { impl AccountCurrent { fn new(initial_data: AccountData) -> Self { - let balance = initial_data.balance(); + let balance = initial_data.coin_balance().unwrap(); let sequence_number = initial_data.sequence_number(); let sent_events_count = initial_data.sent_events_count(); let received_events_count = initial_data.received_events_count(); @@ -401,7 +401,7 @@ pub fn assert_accounts_match( .read_account_resource(account.account()) .expect("account resource must exist"); let coin_store_resource = executor - .read_coin_store_resource(account.account()) + .read_apt_coin_store_resource(account.account()) .expect("account balance resource must exist"); let auth_key = account.account().auth_key(); prop_assert_eq!( diff --git a/aptos-move/e2e-tests/src/account_universe/create_account.rs b/aptos-move/e2e-tests/src/account_universe/create_account.rs index c5a760fe056e0..db2b1f57228b7 100644 --- a/aptos-move/e2e-tests/src/account_universe/create_account.rs +++ b/aptos-move/e2e-tests/src/account_universe/create_account.rs @@ -59,6 +59,8 @@ impl AUTransactionGen for CreateAccountGen { self.new_account.clone(), self.amount, 0, + false, + false, )); } else { gas_used = 0; diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index 7113f7b704b4b..e995016eea149 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -7,12 +7,14 @@ use crate::account::AccountData; use aptos_types::{ account_config::CoinInfoResource, + on_chain_config::{Features, OnChainConfig}, state_store::{ errors::StateviewError, in_memory_state_view::InMemoryStateView, state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, TStateView, }, transaction::ChangeSet, write_set::{TransactionWrite, WriteSet}, + SupraCoinType, }; use aptos_vm_genesis::{ generate_genesis_change_set_for_mainnet, generate_genesis_change_set_for_testing, @@ -104,7 +106,7 @@ impl FakeDataStore { /// Adds CoinInfo to this data store. pub fn add_coin_info(&mut self) { - let coin_info = CoinInfoResource::random(u128::MAX); + let coin_info = CoinInfoResource::::random(u128::MAX); let write_set = coin_info.to_writeset(0).expect("access path in test"); self.add_write_set(&write_set) } @@ -118,6 +120,14 @@ impl FakeDataStore { StateValue::new_legacy(blob.into()), ); } + + pub fn set_features(&mut self, features: Features) { + let bytes = bcs::to_bytes(&features).expect("Features should always be serializable"); + self.set( + StateKey::resource(Features::address(), &Features::struct_tag()).unwrap(), + StateValue::new_legacy(bytes.into()), + ); + } } // This is used by the `execute_block` API. @@ -140,3 +150,38 @@ impl TStateView for FakeDataStore { InMemoryStateView::new(self.state_data.clone()) } } + +#[cfg(test)] +mod test { + use super::*; + use aptos_types::on_chain_config::{FeatureFlag, Features}; + use claims::*; + + #[test] + fn test_features_can_be_set() { + let mut data_store = FakeDataStore::default(); + assert_none!(Features::fetch_config(&data_store)); + + data_store.set_features(Features::default()); + let features = assert_some!(Features::fetch_config(&data_store)); + assert_eq!(features, Features::default()) + } + + #[test] + fn test_features_can_be_reset() { + use claims::*; + + let mut data_store = FakeDataStore::default(); + data_store.add_write_set(GENESIS_CHANGE_SET_HEAD.write_set()); + + // Reset the feature. + let mut features = assert_some!(Features::fetch_config(&data_store)); + assert!(features.is_enabled(FeatureFlag::STORAGE_SLOT_METADATA)); + features.disable(FeatureFlag::STORAGE_SLOT_METADATA); + data_store.set_features(features.clone()); + + let reset_features = assert_some!(Features::fetch_config(&data_store)); + assert!(!reset_features.is_enabled(FeatureFlag::STORAGE_SLOT_METADATA)); + assert_eq!(reset_features, features) + } +} diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 61799b7946f51..8728538028c8d 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -24,8 +24,8 @@ use aptos_gas_schedule::{AptosGasParameters, InitialGasSchedule, LATEST_GAS_FEAT use aptos_keygen::KeyGen; use aptos_types::{ account_config::{ - new_block_event_key, AccountResource, CoinInfoResource, CoinStoreResource, NewBlockEvent, - CORE_CODE_ADDRESS, + new_block_event_key, AccountResource, CoinInfoResource, CoinStoreResource, + ConcurrentSupply, NewBlockEvent, ObjectGroupResource, CORE_CODE_ADDRESS, }, block_executor::config::{ BlockExecutorConfig, BlockExecutorConfigFromOnchain, BlockExecutorLocalConfig, @@ -40,19 +40,19 @@ use aptos_types::{ signature_verified_transaction::{ into_signature_verified_block, SignatureVerifiedTransaction, }, - BlockOutput, EntryFunction, ExecutionStatus, SignedTransaction, Transaction, - TransactionOutput, TransactionPayload, TransactionStatus, VMValidatorResult, - ViewFunctionOutput, + BlockOutput, ExecutionStatus, SignedTransaction, Transaction, TransactionOutput, + TransactionPayload, TransactionStatus, VMValidatorResult, ViewFunctionOutput, }, vm_status::VMStatus, - write_set::WriteSet, + write_set::{WriteOp, WriteSet, WriteSetMut}, + SupraCoinType, CoinType, }; use aptos_vm::{ block_executor::{AptosTransactionOutput, BlockAptosVM}, data_cache::AsMoveResolver, - gas::{get_gas_parameters, make_prod_gas_meter}, - move_vm_ext::{AptosMoveResolver, MoveVmExt, SessionId}, - verifier, AptosVM, VMValidator, + gas::make_prod_gas_meter, + move_vm_ext::{MoveVmExt, SessionId}, + AptosVM, VMValidator, }; use aptos_vm_genesis::{generate_genesis_change_set_for_testing_with_count, GenesisOptions}; use aptos_vm_logging::log_schema::AdapterLogSchema; @@ -64,14 +64,14 @@ use bytes::Bytes; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, - language_storage::{ModuleId, TypeTag}, - move_resource::MoveResource, + language_storage::{ModuleId, StructTag, TypeTag}, + move_resource::{MoveResource, MoveStructType}, }; use move_vm_runtime::module_traversal::{TraversalContext, TraversalStorage}; use move_vm_types::gas::UnmeteredGasMeter; use serde::Serialize; use std::{ - collections::BTreeSet, + collections::{BTreeMap, BTreeSet}, env, fs::{self, OpenOptions}, io::Write, @@ -368,8 +368,30 @@ impl FakeExecutor { pub fn new_account_data_at(&mut self, addr: AccountAddress) -> AccountData { // The below will use the genesis keypair but that should be fine. let acc = Account::new_genesis_account(addr); + + // Mint the account 10M Aptos coins (with 8 decimals). + self.store_and_fund_account(acc, 1_000_000_000_000_000, 0) + } + + pub fn store_and_fund_account( + &mut self, + account: Account, + balance: u64, + seq_num: u64, + ) -> AccountData { + let features = Features::fetch_config(&self.data_store).unwrap_or_default(); + let use_fa_balance = features.is_enabled(FeatureFlag::NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE); + let use_concurrent_balance = + features.is_enabled(FeatureFlag::DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE); + // Mint the account 10M Aptos coins (with 8 decimals). - let data = AccountData::with_account(acc, 1_000_000_000_000_000, 0); + let data = AccountData::with_account( + account, + balance, + seq_num, + use_fa_balance, + use_concurrent_balance, + ); self.add_account_data(&data); data } @@ -386,20 +408,60 @@ impl FakeExecutor { /// Adds an account to this executor's data store. pub fn add_account_data(&mut self, account_data: &AccountData) { self.data_store.add_account_data(account_data); - let new_added_supply = account_data.balance(); // When a new account data with balance is initialized. The total_supply should be updated // correspondingly to be consistent with the global state. // if new_added_supply = 0, it is a noop. - if new_added_supply != 0 { - let coin_info_resource = self - .read_coin_info_resource() - .expect("coin info must exist in data store"); - let old_supply = self.read_coin_supply().unwrap(); - self.data_store.add_write_set( - &coin_info_resource - .to_writeset(old_supply + (new_added_supply as u128)) + + if let Some(new_added_supply) = account_data.coin_balance() { + if new_added_supply != 0 { + let coin_info_resource = self + .read_apt_coin_info_resource() + .expect("coin info must exist in data store"); + let old_supply = self.read_coin_supply().unwrap(); + self.data_store.add_write_set( + &coin_info_resource + .to_writeset(old_supply + (new_added_supply as u128)) + .unwrap(), + ) + } + } + + if let Some(new_added_supply) = account_data.fungible_balance() { + if new_added_supply != 0 { + let mut fa_resource_group = self + .read_resource_group::(&AccountAddress::TEN) + .expect("resource group must exist in data store"); + let mut supply = bcs::from_bytes::( + fa_resource_group + .group + .get(&ConcurrentSupply::struct_tag()) + .unwrap(), + ) + .unwrap(); + supply + .current + .set(supply.current.get() + new_added_supply as u128); + fa_resource_group + .group + .insert( + ConcurrentSupply::struct_tag(), + bcs::to_bytes(&supply).unwrap(), + ) + .unwrap(); + self.data_store.add_write_set( + &WriteSetMut::new(vec![( + StateKey::resource_group( + &AccountAddress::TEN, + &ObjectGroupResource::struct_tag(), + ), + WriteOp::legacy_modification( + bcs::to_bytes(&fa_resource_group).unwrap().into(), + ), + )]) + .freeze() .unwrap(), - ) + ) + } } } @@ -430,6 +492,37 @@ impl FakeExecutor { bcs::from_bytes(&data_blob).ok() } + pub fn read_resource_group(&self, addr: &AccountAddress) -> Option { + let data_blob = TStateView::get_state_value_bytes( + &self.data_store, + &StateKey::resource_group(addr, &T::struct_tag()), + ) + .expect("account must exist in data store") + .unwrap_or_else(|| panic!("Can't fetch {} resource group for {}", T::STRUCT_NAME, addr)); + bcs::from_bytes(&data_blob).ok() + } + + pub fn read_resource_from_group( + &self, + addr: &AccountAddress, + resource_group_tag: &StructTag, + ) -> Option { + let bytes_opt = TStateView::get_state_value_bytes( + &self.data_store, + &StateKey::resource_group(addr, resource_group_tag), + ) + .expect("account must exist in data store"); + + let group: Option>> = bytes_opt + .map(|bytes| bcs::from_bytes(&bytes)) + .transpose() + .unwrap(); + group + .and_then(|g| g.get(&T::struct_tag()).map(|b| bcs::from_bytes(b))) + .transpose() + .unwrap() + } + /// Reads the resource `Value` for an account under the given address from /// this executor's data store. pub fn read_account_resource_at_address( @@ -440,8 +533,11 @@ impl FakeExecutor { } /// Reads the CoinStore resource value for an account from this executor's data store. - pub fn read_coin_store_resource(&self, account: &Account) -> Option { - self.read_coin_store_resource_at_address(account.address()) + pub fn read_apt_coin_store_resource( + &self, + account: &Account, + ) -> Option> { + self.read_apt_coin_store_resource_at_address(account.address()) } /// Reads supply from CoinInfo resource value from this executor's data store. @@ -463,16 +559,16 @@ impl FakeExecutor { } /// Reads the CoinInfo resource value from this executor's data store. - pub fn read_coin_info_resource(&self) -> Option { - self.read_resource(&AccountAddress::ONE) + pub fn read_apt_coin_info_resource(&self) -> Option> { + self.read_resource(&SupraCoinType::coin_info_address()) } /// Reads the CoinStore resource value for an account under the given address from this executor's /// data store. - pub fn read_coin_store_resource_at_address( + pub fn read_apt_coin_store_resource_at_address( &self, addr: &AccountAddress, - ) -> Option { + ) -> Option> { self.read_resource(addr) } @@ -533,7 +629,7 @@ impl FakeExecutor { }, onchain: onchain_config, }; - BlockAptosVM::execute_block::< + BlockAptosVM::execute_block_on_thread_pool::< _, NoOpTransactionCommitHook, >( @@ -978,13 +1074,14 @@ impl FakeExecutor { let resolver = self.data_store.as_move_resolver(); // TODO(Gas): we probably want to switch to non-zero costs in the future - let vm = MoveVmExt::new_with_gas_hook( + let vm = MoveVmExt::new_with_extended_options( LATEST_GAS_FEATURE_VERSION, Ok(&AptosGasParameters::zeros()), self.env.clone(), Some(Arc::new(move |expression| { a2.lock().unwrap().push(expression); })), + false, &resolver, ); let mut session = vm.new_session(&resolver, SessionId::void(), None); @@ -1016,14 +1113,14 @@ impl FakeExecutor { println!("Should error, but ignoring for now... {}", err); } } - let change_set = session + let (change_set, module_write_set) = session .finish(&ChangeSetConfigs::unlimited_at_gas_feature_version( LATEST_GAS_FEATURE_VERSION, )) .expect("Failed to generate txn effects"); change_set - .try_into_storage_change_set() - .expect("Failed to convert to ChangeSet") + .try_combine_into_storage_change_set(module_write_set) + .expect("Failed to convert to storage ChangeSet") .into_inner() }; self.data_store.add_write_set(&write_set); @@ -1072,14 +1169,14 @@ impl FakeExecutor { e.into_vm_status() ) }); - let change_set = session + let (change_set, module_write_set) = session .finish(&ChangeSetConfigs::unlimited_at_gas_feature_version( LATEST_GAS_FEATURE_VERSION, )) .expect("Failed to generate txn effects"); change_set - .try_into_storage_change_set() - .expect("Failed to convert to ChangeSet") + .try_combine_into_storage_change_set(module_write_set) + .expect("Failed to convert to storage ChangeSet") .into_inner() }; self.data_store.add_write_set(&write_set); @@ -1096,72 +1193,6 @@ impl FakeExecutor { self.exec_module(&Self::module(module_name), function_name, type_params, args) } - pub fn try_exec_entry_with_state_view( - &mut self, - senders: Vec, - entry_fn: &EntryFunction, - state_view: &impl AptosMoveResolver, - features: Features, - ) -> Result<(WriteSet, Vec), VMStatus> { - let (gas_params, storage_gas_params, gas_feature_version) = - get_gas_parameters(&features, state_view); - - let are_struct_constructors_enabled = features.is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS); - let env = self - .env - .as_ref() - .clone() - .with_features_for_testing(features); - - let vm = MoveVmExt::new( - LATEST_GAS_FEATURE_VERSION, - gas_params.as_ref(), - env, - state_view, - ); - - let mut session = vm.new_session(state_view, SessionId::void(), None); - let func = - session.load_function(entry_fn.module(), entry_fn.function(), entry_fn.ty_args())?; - let args = verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( - &mut session, - senders, - entry_fn.args().to_vec(), - &func, - are_struct_constructors_enabled, - )?; - - let mut gas_meter = make_prod_gas_meter( - gas_feature_version, - gas_params.unwrap().clone().vm, - storage_gas_params.unwrap(), - false, - 10_000_000_000_000.into(), - ); - - let storage = TraversalStorage::new(); - session - .execute_entry_function( - func, - args, - &mut gas_meter, - &mut TraversalContext::new(&storage), - ) - .map_err(|e| e.into_vm_status())?; - - let mut change_set = session - .finish(&ChangeSetConfigs::unlimited_at_gas_feature_version( - LATEST_GAS_FEATURE_VERSION, - )) - .expect("Failed to generate txn effects"); - change_set.try_materialize_aggregator_v1_delta_set(state_view)?; - let (write_set, events) = change_set - .try_into_storage_change_set() - .expect("Failed to convert to ChangeSet") - .into_inner(); - Ok((write_set, events)) - } - pub fn try_exec( &mut self, module_name: &str, @@ -1190,15 +1221,14 @@ impl FakeExecutor { ) .map_err(|e| e.into_vm_status())?; - let change_set = session + let (change_set, module_write_set) = session .finish(&ChangeSetConfigs::unlimited_at_gas_feature_version( LATEST_GAS_FEATURE_VERSION, )) .expect("Failed to generate txn effects"); - // TODO: Support deltas in fake executor. let (write_set, events) = change_set - .try_into_storage_change_set() - .expect("Failed to convert to ChangeSet") + .try_combine_into_storage_change_set(module_write_set) + .expect("Failed to convert to storage ChangeSet") .into_inner(); Ok((write_set, events)) } diff --git a/aptos-move/e2e-testsuite/src/tests/create_account.rs b/aptos-move/e2e-testsuite/src/tests/create_account.rs index 0a16e65d97ccc..0d2a48056c5bb 100644 --- a/aptos-move/e2e-testsuite/src/tests/create_account.rs +++ b/aptos-move/e2e-testsuite/src/tests/create_account.rs @@ -35,7 +35,7 @@ fn create_account() { .expect("sender must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(&new_account) + .read_apt_coin_store_resource(&new_account) .expect("receiver balance must exist"); assert_eq!(initial_amount, updated_receiver_balance.coin()); assert_eq!(1, updated_sender.sequence_number()); diff --git a/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs b/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs index dfb5884450d11..bee9a68986d47 100644 --- a/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs +++ b/aptos-move/e2e-testsuite/src/tests/peer_to_peer.rs @@ -40,10 +40,10 @@ fn single_peer_to_peer_with_event() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(receiver.account()) + .read_apt_coin_store_resource(receiver.account()) .expect("receiver balance must exist"); assert_eq!(receiver_balance, updated_receiver_balance.coin()); assert_eq!(sender_balance, updated_sender_balance.coin()); @@ -102,10 +102,10 @@ fn few_peer_to_peer_with_event() { } let original_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); let original_receiver_balance = executor - .read_coin_store_resource(receiver.account()) + .read_apt_coin_store_resource(receiver.account()) .expect("receiver balcne must exist"); executor.apply_write_set(txn_output.write_set()); @@ -116,10 +116,10 @@ fn few_peer_to_peer_with_event() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(receiver.account()) + .read_apt_coin_store_resource(receiver.account()) .expect("receiver balance must exist"); assert_eq!(receiver_balance, updated_receiver_balance.coin()); assert_eq!(sender_balance, updated_sender_balance.coin()); @@ -249,12 +249,12 @@ pub(crate) fn check_and_apply_transfer_output( .read_account_resource(sender) .expect("sender must exist"); let sender_balance = executor - .read_coin_store_resource(sender) + .read_apt_coin_store_resource(sender) .expect("sender balance must exist"); let sender_initial_balance = sender_balance.coin(); let sender_seq_num = sender_resource.sequence_number(); let receiver_initial_balance = executor - .read_coin_store_resource(receiver) + .read_apt_coin_store_resource(receiver) .expect("receiver balance must exist") .coin(); @@ -269,10 +269,10 @@ pub(crate) fn check_and_apply_transfer_output( .read_account_resource(sender) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender) + .read_apt_coin_store_resource(sender) .expect("sender balance must exist"); let updated_receiver_balance = executor - .read_coin_store_resource(receiver) + .read_apt_coin_store_resource(receiver) .expect("receiver balance must exist"); assert_eq!(receiver_balance, updated_receiver_balance.coin()); assert_eq!(sender_balance, updated_sender_balance.coin()); diff --git a/aptos-move/e2e-testsuite/src/tests/scripts.rs b/aptos-move/e2e-testsuite/src/tests/scripts.rs index 881f35dd674fd..413b7c845fb0c 100644 --- a/aptos-move/e2e-testsuite/src/tests/scripts.rs +++ b/aptos-move/e2e-testsuite/src/tests/scripts.rs @@ -63,7 +63,7 @@ fn script_code_unverifiable() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -142,7 +142,7 @@ fn script_none_existing_module_dep() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -221,7 +221,7 @@ fn script_non_existing_function_dep() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -301,7 +301,7 @@ fn script_bad_sig_function_dep() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -367,7 +367,7 @@ fn script_type_argument_module_does_not_exist() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -435,7 +435,7 @@ fn script_nested_type_argument_module_does_not_exist() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); @@ -516,7 +516,7 @@ fn forbid_script_emitting_events() { .read_account_resource(sender.account()) .expect("sender must exist"); let updated_sender_balance = executor - .read_coin_store_resource(sender.account()) + .read_apt_coin_store_resource(sender.account()) .expect("sender balance must exist"); assert_eq!(balance, updated_sender_balance.coin()); assert_eq!(11, updated_sender.sequence_number()); diff --git a/aptos-move/framework/Cargo.toml b/aptos-move/framework/Cargo.toml index 3d5bfd0f98501..fb7d4cb4d4495 100644 --- a/aptos-move/framework/Cargo.toml +++ b/aptos-move/framework/Cargo.toml @@ -52,6 +52,7 @@ move-binary-format = { workspace = true } move-cli = { workspace = true } move-command-line-common = { workspace = true } move-compiler = { workspace = true } +move-compiler-v2 = { workspace = true } move-core-types = { workspace = true } move-docgen = { workspace = true } move-model = { workspace = true } diff --git a/aptos-move/framework/aptos-stdlib/doc/any.md b/aptos-move/framework/aptos-stdlib/doc/any.md index e8198a8d9bdbd..1a4704525137b 100644 --- a/aptos-move/framework/aptos-stdlib/doc/any.md +++ b/aptos-move/framework/aptos-stdlib/doc/any.md @@ -120,7 +120,7 @@ also required from T. Unpack a value from the Any representation. This aborts if the value has not the expected type T. -
public fun unpack<T>(x: any::Any): T
+
public fun unpack<T>(self: any::Any): T
 
@@ -129,9 +129,9 @@ Unpack a value from the Any repres Implementation -
public fun unpack<T>(x: Any): T {
-    assert!(type_info::type_name<T>() == x.type_name, error::invalid_argument(ETYPE_MISMATCH));
-    from_bytes<T>(x.data)
+
public fun unpack<T>(self: Any): T {
+    assert!(type_info::type_name<T>() == self.type_name, error::invalid_argument(ETYPE_MISMATCH));
+    from_bytes<T>(self.data)
 }
 
@@ -146,7 +146,7 @@ Unpack a value from the Any repres Returns the type name of this Any -
public fun type_name(x: &any::Any): &string::String
+
public fun type_name(self: &any::Any): &string::String
 
@@ -155,8 +155,8 @@ Returns the type name of this Any Implementation -
public fun type_name(x: &Any): &String {
-    &x.type_name
+
public fun type_name(self: &Any): &String {
+    &self.type_name
 }
 
@@ -195,14 +195,14 @@ Returns the type name of this Any ### Function `unpack` -
public fun unpack<T>(x: any::Any): T
+
public fun unpack<T>(self: any::Any): T
 
include UnpackAbortsIf<T>;
-ensures result == from_bcs::deserialize<T>(x.data);
+ensures result == from_bcs::deserialize<T>(self.data);
 
@@ -212,9 +212,9 @@ Returns the type name of this Any
schema UnpackAbortsIf<T> {
-    x: Any;
-    aborts_if type_info::type_name<T>() != x.type_name;
-    aborts_if !from_bcs::deserializable<T>(x.data);
+    self: Any;
+    aborts_if type_info::type_name<T>() != self.type_name;
+    aborts_if !from_bcs::deserializable<T>(self.data);
 }
 
@@ -225,9 +225,9 @@ Returns the type name of this Any
schema UnpackRequirement<T> {
-    x: Any;
-    requires type_info::type_name<T>() == x.type_name;
-    requires from_bcs::deserializable<T>(x.data);
+    self: Any;
+    requires type_info::type_name<T>() == self.type_name;
+    requires from_bcs::deserializable<T>(self.data);
 }
 
@@ -238,14 +238,14 @@ Returns the type name of this Any ### Function `type_name` -
public fun type_name(x: &any::Any): &string::String
+
public fun type_name(self: &any::Any): &string::String
 
aborts_if false;
-ensures result == x.type_name;
+ensures result == self.type_name;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/big_vector.md b/aptos-move/framework/aptos-stdlib/doc/big_vector.md index cd3c8996c6564..7d785a5b730a1 100644 --- a/aptos-move/framework/aptos-stdlib/doc/big_vector.md +++ b/aptos-move/framework/aptos-stdlib/doc/big_vector.md @@ -197,11 +197,11 @@ Create a vector of length 1 containing the passed in element. ## Function `destroy_empty` -Destroy the vector v. -Aborts if v is not empty. +Destroy the vector self. +Aborts if self is not empty. -
public fun destroy_empty<T>(v: big_vector::BigVector<T>)
+
public fun destroy_empty<T>(self: big_vector::BigVector<T>)
 
@@ -210,9 +210,9 @@ Aborts if v is not empty. Implementation -
public fun destroy_empty<T>(v: BigVector<T>) {
-    assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY));
-    let BigVector { buckets, end_index: _, bucket_size: _ } = v;
+
public fun destroy_empty<T>(self: BigVector<T>) {
+    assert!(is_empty(&self), error::invalid_argument(EVECTOR_NOT_EMPTY));
+    let BigVector { buckets, end_index: _, bucket_size: _ } = self;
     table_with_length::destroy_empty(buckets);
 }
 
@@ -225,10 +225,10 @@ Aborts if v is not empty. ## Function `destroy` -Destroy the vector v if T has drop +Destroy the vector self if T has drop -
public fun destroy<T: drop>(v: big_vector::BigVector<T>)
+
public fun destroy<T: drop>(self: big_vector::BigVector<T>)
 
@@ -237,8 +237,8 @@ Destroy the vector v if T has drop Implementation -
public fun destroy<T: drop>(v: BigVector<T>) {
-    let BigVector { buckets, end_index, bucket_size: _ } = v;
+
public fun destroy<T: drop>(self: BigVector<T>) {
+    let BigVector { buckets, end_index, bucket_size: _ } = self;
     let i = 0;
     while (end_index > 0) {
         let num_elements = vector::length(&table_with_length::remove(&mut buckets, i));
@@ -257,11 +257,11 @@ Destroy the vector v if T has drop
 
 ## Function `borrow`
 
-Acquire an immutable reference to the ith element of the vector v.
+Acquire an immutable reference to the ith element of the vector self.
 Aborts if i is out of bounds.
 
 
-
public fun borrow<T>(v: &big_vector::BigVector<T>, i: u64): &T
+
public fun borrow<T>(self: &big_vector::BigVector<T>, i: u64): &T
 
@@ -270,9 +270,9 @@ Aborts if i is out of bounds. Implementation -
public fun borrow<T>(v: &BigVector<T>, i: u64): &T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    vector::borrow(table_with_length::borrow(&v.buckets, i / v.bucket_size), i % v.bucket_size)
+
public fun borrow<T>(self: &BigVector<T>, i: u64): &T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    vector::borrow(table_with_length::borrow(&self.buckets, i / self.bucket_size), i % self.bucket_size)
 }
 
@@ -284,11 +284,11 @@ Aborts if i is out of bounds. ## Function `borrow_mut` -Return a mutable reference to the ith element in the vector v. +Return a mutable reference to the ith element in the vector self. Aborts if i is out of bounds. -
public fun borrow_mut<T>(v: &mut big_vector::BigVector<T>, i: u64): &mut T
+
public fun borrow_mut<T>(self: &mut big_vector::BigVector<T>, i: u64): &mut T
 
@@ -297,9 +297,9 @@ Aborts if i is out of bounds. Implementation -
public fun borrow_mut<T>(v: &mut BigVector<T>, i: u64): &mut T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    vector::borrow_mut(table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size), i % v.bucket_size)
+
public fun borrow_mut<T>(self: &mut BigVector<T>, i: u64): &mut T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    vector::borrow_mut(table_with_length::borrow_mut(&mut self.buckets, i / self.bucket_size), i % self.bucket_size)
 }
 
@@ -311,12 +311,12 @@ Aborts if i is out of bounds. ## Function `append` -Empty and destroy the other vector, and push each of the elements in the other vector onto the lhs vector in the +Empty and destroy the other vector, and push each of the elements in the other vector onto the self vector in the same order as they occurred in other. Disclaimer: This function is costly. Use it at your own discretion. -
public fun append<T: store>(lhs: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
+
public fun append<T: store>(self: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
 
@@ -325,16 +325,16 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun append<T: store>(lhs: &mut BigVector<T>, other: BigVector<T>) {
+
public fun append<T: store>(self: &mut BigVector<T>, other: BigVector<T>) {
     let other_len = length(&other);
     let half_other_len = other_len / 2;
     let i = 0;
     while (i < half_other_len) {
-        push_back(lhs, swap_remove(&mut other, i));
+        push_back(self, swap_remove(&mut other, i));
         i = i + 1;
     };
     while (i < other_len) {
-        push_back(lhs, pop_back(&mut other));
+        push_back(self, pop_back(&mut other));
         i = i + 1;
     };
     destroy_empty(other);
@@ -349,11 +349,11 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
 ## Function `push_back`
 
-Add element val to the end of the vector v. It grows the buckets when the current buckets are full.
+Add element val to the end of the vector self. It grows the buckets when the current buckets are full.
 This operation will cost more gas when it adds new bucket.
 
 
-
public fun push_back<T: store>(v: &mut big_vector::BigVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut big_vector::BigVector<T>, val: T)
 
@@ -362,15 +362,15 @@ This operation will cost more gas when it adds new bucket. Implementation -
public fun push_back<T: store>(v: &mut BigVector<T>, val: T) {
-    let num_buckets = table_with_length::length(&v.buckets);
-    if (v.end_index == num_buckets * v.bucket_size) {
-        table_with_length::add(&mut v.buckets, num_buckets, vector::empty());
-        vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets), val);
+
public fun push_back<T: store>(self: &mut BigVector<T>, val: T) {
+    let num_buckets = table_with_length::length(&self.buckets);
+    if (self.end_index == num_buckets * self.bucket_size) {
+        table_with_length::add(&mut self.buckets, num_buckets, vector::empty());
+        vector::push_back(table_with_length::borrow_mut(&mut self.buckets, num_buckets), val);
     } else {
-        vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1), val);
+        vector::push_back(table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1), val);
     };
-    v.end_index = v.end_index + 1;
+    self.end_index = self.end_index + 1;
 }
 
@@ -382,12 +382,12 @@ This operation will cost more gas when it adds new bucket. ## Function `pop_back` -Pop an element from the end of vector v. It doesn't shrink the buckets even if they're empty. +Pop an element from the end of vector self. It doesn't shrink the buckets even if they're empty. Call shrink_to_fit explicity to deallocate empty buckets. -Aborts if v is empty. +Aborts if self is empty. -
public fun pop_back<T>(v: &mut big_vector::BigVector<T>): T
+
public fun pop_back<T>(self: &mut big_vector::BigVector<T>): T
 
@@ -396,17 +396,17 @@ Aborts if v is empty. Implementation -
public fun pop_back<T>(v: &mut BigVector<T>): T {
-    assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY));
-    let num_buckets = table_with_length::length(&v.buckets);
-    let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1);
+
public fun pop_back<T>(self: &mut BigVector<T>): T {
+    assert!(!is_empty(self), error::invalid_state(EVECTOR_EMPTY));
+    let num_buckets = table_with_length::length(&self.buckets);
+    let last_bucket = table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1);
     let val = vector::pop_back(last_bucket);
     // Shrink the table if the last vector is empty.
     if (vector::is_empty(last_bucket)) {
         move last_bucket;
-        vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1));
+        vector::destroy_empty(table_with_length::remove(&mut self.buckets, num_buckets - 1));
     };
-    v.end_index = v.end_index - 1;
+    self.end_index = self.end_index - 1;
     val
 }
 
@@ -419,12 +419,12 @@ Aborts if v is empty. ## Function `remove` -Remove the element at index i in the vector v and return the owned value that was previously stored at i in v. +Remove the element at index i in the vector v and return the owned value that was previously stored at i in self. All elements occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. Disclaimer: This function is costly. Use it at your own discretion. -
public fun remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
@@ -433,28 +433,28 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun remove<T>(v: &mut BigVector<T>, i: u64): T {
-    let len = length(v);
+
public fun remove<T>(self: &mut BigVector<T>, i: u64): T {
+    let len = length(self);
     assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let num_buckets = table_with_length::length(&v.buckets);
-    let cur_bucket_index = i / v.bucket_size + 1;
-    let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1);
-    let res = vector::remove(cur_bucket, i % v.bucket_size);
-    v.end_index = v.end_index - 1;
+    let num_buckets = table_with_length::length(&self.buckets);
+    let cur_bucket_index = i / self.bucket_size + 1;
+    let cur_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index - 1);
+    let res = vector::remove(cur_bucket, i % self.bucket_size);
+    self.end_index = self.end_index - 1;
     move cur_bucket;
     while ({
         spec {
             invariant cur_bucket_index <= num_buckets;
-            invariant table_with_length::spec_len(v.buckets) == num_buckets;
+            invariant table_with_length::spec_len(self.buckets) == num_buckets;
         };
         (cur_bucket_index < num_buckets)
     }) {
         // remove one element from the start of current vector
-        let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index);
+        let cur_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index);
         let t = vector::remove(cur_bucket, 0);
         move cur_bucket;
         // and put it at the end of the last one
-        let prev_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1);
+        let prev_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index - 1);
         vector::push_back(prev_bucket, t);
         cur_bucket_index = cur_bucket_index + 1;
     };
@@ -463,10 +463,10 @@ Disclaimer: This function is costly. Use it at your own discretion.
     };
 
     // Shrink the table if the last vector is empty.
-    let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1);
+    let last_bucket = table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1);
     if (vector::is_empty(last_bucket)) {
         move last_bucket;
-        vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1));
+        vector::destroy_empty(table_with_length::remove(&mut self.buckets, num_buckets - 1));
     };
 
     res
@@ -481,12 +481,12 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
 ## Function `swap_remove`
 
-Swap the ith element of the vector v with the last element and then pop the vector.
+Swap the ith element of the vector self with the last element and then pop the vector.
 This is O(1), but does not preserve ordering of elements in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun swap_remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
@@ -495,20 +495,20 @@ Aborts if i is out of bounds. Implementation -
public fun swap_remove<T>(v: &mut BigVector<T>, i: u64): T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let last_val = pop_back(v);
+
public fun swap_remove<T>(self: &mut BigVector<T>, i: u64): T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let last_val = pop_back(self);
     // if the requested value is the last one, return it
-    if (v.end_index == i) {
+    if (self.end_index == i) {
         return last_val
     };
     // because the lack of mem::swap, here we swap remove the requested value from the bucket
     // and append the last_val to the bucket then swap the last bucket val back
-    let bucket = table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size);
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, i / self.bucket_size);
     let bucket_len = vector::length(bucket);
-    let val = vector::swap_remove(bucket, i % v.bucket_size);
+    let val = vector::swap_remove(bucket, i % self.bucket_size);
     vector::push_back(bucket, last_val);
-    vector::swap(bucket, i % v.bucket_size, bucket_len - 1);
+    vector::swap(bucket, i % self.bucket_size, bucket_len - 1);
     val
 }
 
@@ -521,11 +521,11 @@ Aborts if i is out of bounds. ## Function `swap` -Swap the elements at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds -for v. +Swap the elements at the i'th and j'th indices in the vector self. Will abort if either of i or j are out of bounds +for self. -
public fun swap<T>(v: &mut big_vector::BigVector<T>, i: u64, j: u64)
+
public fun swap<T>(self: &mut big_vector::BigVector<T>, i: u64, j: u64)
 
@@ -534,19 +534,19 @@ for v. Implementation -
public fun swap<T>(v: &mut BigVector<T>, i: u64, j: u64) {
-    assert!(i < length(v) && j < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let i_bucket_index = i / v.bucket_size;
-    let j_bucket_index = j / v.bucket_size;
-    let i_vector_index = i % v.bucket_size;
-    let j_vector_index = j % v.bucket_size;
+
public fun swap<T>(self: &mut BigVector<T>, i: u64, j: u64) {
+    assert!(i < length(self) && j < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let i_bucket_index = i / self.bucket_size;
+    let j_bucket_index = j / self.bucket_size;
+    let i_vector_index = i % self.bucket_size;
+    let j_vector_index = j % self.bucket_size;
     if (i_bucket_index == j_bucket_index) {
-        vector::swap(table_with_length::borrow_mut(&mut v.buckets, i_bucket_index), i_vector_index, j_vector_index);
+        vector::swap(table_with_length::borrow_mut(&mut self.buckets, i_bucket_index), i_vector_index, j_vector_index);
         return
     };
     // If i and j are in different buckets, take the buckets out first for easy mutation.
-    let bucket_i = table_with_length::remove(&mut v.buckets, i_bucket_index);
-    let bucket_j = table_with_length::remove(&mut v.buckets, j_bucket_index);
+    let bucket_i = table_with_length::remove(&mut self.buckets, i_bucket_index);
+    let bucket_j = table_with_length::remove(&mut self.buckets, j_bucket_index);
     // Get the elements from buckets by calling `swap_remove`.
     let element_i = vector::swap_remove(&mut bucket_i, i_vector_index);
     let element_j = vector::swap_remove(&mut bucket_j, j_vector_index);
@@ -559,8 +559,8 @@ for v.
     vector::swap(&mut bucket_i, i_vector_index, last_index_in_bucket_i);
     vector::swap(&mut bucket_j, j_vector_index, last_index_in_bucket_j);
     // Add back the buckets.
-    table_with_length::add(&mut v.buckets, i_bucket_index, bucket_i);
-    table_with_length::add(&mut v.buckets, j_bucket_index, bucket_j);
+    table_with_length::add(&mut self.buckets, i_bucket_index, bucket_i);
+    table_with_length::add(&mut self.buckets, j_bucket_index, bucket_j);
 }
 
@@ -572,11 +572,11 @@ for v. ## Function `reverse` -Reverse the order of the elements in the vector v in-place. +Reverse the order of the elements in the vector self in-place. Disclaimer: This function is costly. Use it at your own discretion. -
public fun reverse<T>(v: &mut big_vector::BigVector<T>)
+
public fun reverse<T>(self: &mut big_vector::BigVector<T>)
 
@@ -585,17 +585,17 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun reverse<T>(v: &mut BigVector<T>) {
+
public fun reverse<T>(self: &mut BigVector<T>) {
     let new_buckets = vector[];
     let push_bucket = vector[];
-    let num_buckets = table_with_length::length(&v.buckets);
+    let num_buckets = table_with_length::length(&self.buckets);
     let num_buckets_left = num_buckets;
 
     while (num_buckets_left > 0) {
-        let pop_bucket = table_with_length::remove(&mut v.buckets, num_buckets_left - 1);
+        let pop_bucket = table_with_length::remove(&mut self.buckets, num_buckets_left - 1);
         vector::for_each_reverse(pop_bucket, |val| {
             vector::push_back(&mut push_bucket, val);
-            if (vector::length(&push_bucket) == v.bucket_size) {
+            if (vector::length(&push_bucket) == self.bucket_size) {
                 vector::push_back(&mut new_buckets, push_bucket);
                 push_bucket = vector[];
             };
@@ -611,9 +611,9 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
     vector::reverse(&mut new_buckets);
     let i = 0;
-    assert!(table_with_length::length(&v.buckets) == 0, 0);
+    assert!(table_with_length::length(&self.buckets) == 0, 0);
     while (i < num_buckets) {
-        table_with_length::add(&mut v.buckets, i, vector::pop_back(&mut new_buckets));
+        table_with_length::add(&mut self.buckets, i, vector::pop_back(&mut new_buckets));
         i = i + 1;
     };
     vector::destroy_empty(new_buckets);
@@ -628,12 +628,12 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
 ## Function `index_of`
 
-Return the index of the first occurrence of an element in v that is equal to e. Returns (true, index) if such an
+Return the index of the first occurrence of an element in self that is equal to e. Returns (true, index) if such an
 element was found, and (false, 0) otherwise.
 Disclaimer: This function is costly. Use it at your own discretion.
 
 
-
public fun index_of<T>(v: &big_vector::BigVector<T>, val: &T): (bool, u64)
+
public fun index_of<T>(self: &big_vector::BigVector<T>, val: &T): (bool, u64)
 
@@ -642,14 +642,14 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun index_of<T>(v: &BigVector<T>, val: &T): (bool, u64) {
-    let num_buckets = table_with_length::length(&v.buckets);
+
public fun index_of<T>(self: &BigVector<T>, val: &T): (bool, u64) {
+    let num_buckets = table_with_length::length(&self.buckets);
     let bucket_index = 0;
     while (bucket_index < num_buckets) {
-        let cur = table_with_length::borrow(&v.buckets, bucket_index);
+        let cur = table_with_length::borrow(&self.buckets, bucket_index);
         let (found, i) = vector::index_of(cur, val);
         if (found) {
-            return (true, bucket_index * v.bucket_size + i)
+            return (true, bucket_index * self.bucket_size + i)
         };
         bucket_index = bucket_index + 1;
     };
@@ -665,11 +665,11 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
 ## Function `contains`
 
-Return if an element equal to e exists in the vector v.
+Return if an element equal to e exists in the vector self.
 Disclaimer: This function is costly. Use it at your own discretion.
 
 
-
public fun contains<T>(v: &big_vector::BigVector<T>, val: &T): bool
+
public fun contains<T>(self: &big_vector::BigVector<T>, val: &T): bool
 
@@ -678,9 +678,9 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun contains<T>(v: &BigVector<T>, val: &T): bool {
-    if (is_empty(v)) return false;
-    let (exist, _) = index_of(v, val);
+
public fun contains<T>(self: &BigVector<T>, val: &T): bool {
+    if (is_empty(self)) return false;
+    let (exist, _) = index_of(self, val);
     exist
 }
 
@@ -698,7 +698,7 @@ atomic view of the whole vector. Disclaimer: This function may be costly as the big vector may be huge in size. Use it at your own discretion. -
public fun to_vector<T: copy>(v: &big_vector::BigVector<T>): vector<T>
+
public fun to_vector<T: copy>(self: &big_vector::BigVector<T>): vector<T>
 
@@ -707,12 +707,12 @@ Disclaimer: This function may be costly as the big vector may be huge in size. U Implementation -
public fun to_vector<T: copy>(v: &BigVector<T>): vector<T> {
+
public fun to_vector<T: copy>(self: &BigVector<T>): vector<T> {
     let res = vector[];
-    let num_buckets = table_with_length::length(&v.buckets);
+    let num_buckets = table_with_length::length(&self.buckets);
     let i = 0;
     while (i < num_buckets) {
-        vector::append(&mut res, *table_with_length::borrow(&v.buckets, i));
+        vector::append(&mut res, *table_with_length::borrow(&self.buckets, i));
         i = i + 1;
     };
     res
@@ -730,7 +730,7 @@ Disclaimer: This function may be costly as the big vector may be huge in size. U
 Return the length of the vector.
 
 
-
public fun length<T>(v: &big_vector::BigVector<T>): u64
+
public fun length<T>(self: &big_vector::BigVector<T>): u64
 
@@ -739,8 +739,8 @@ Return the length of the vector. Implementation -
public fun length<T>(v: &BigVector<T>): u64 {
-    v.end_index
+
public fun length<T>(self: &BigVector<T>): u64 {
+    self.end_index
 }
 
@@ -755,7 +755,7 @@ Return the length of the vector. Return true if the vector v has no elements and false otherwise. -
public fun is_empty<T>(v: &big_vector::BigVector<T>): bool
+
public fun is_empty<T>(self: &big_vector::BigVector<T>): bool
 
@@ -764,8 +764,8 @@ Return true if the vector v has no elements and Implementation -
public fun is_empty<T>(v: &BigVector<T>): bool {
-    length(v) == 0
+
public fun is_empty<T>(self: &BigVector<T>): bool {
+    length(self) == 0
 }
 
@@ -876,13 +876,13 @@ Return true if the vector v has no elements and ### Function `destroy_empty` -
public fun destroy_empty<T>(v: big_vector::BigVector<T>)
+
public fun destroy_empty<T>(self: big_vector::BigVector<T>)
 
-
aborts_if !is_empty(v);
+
aborts_if !is_empty(self);
 
@@ -892,14 +892,14 @@ Return true if the vector v has no elements and ### Function `borrow` -
public fun borrow<T>(v: &big_vector::BigVector<T>, i: u64): &T
+
public fun borrow<T>(self: &big_vector::BigVector<T>, i: u64): &T
 
-
aborts_if i >= length(v);
-ensures result == spec_at(v, i);
+
aborts_if i >= length(self);
+ensures result == spec_at(self, i);
 
@@ -909,14 +909,14 @@ Return true if the vector v has no elements and ### Function `borrow_mut` -
public fun borrow_mut<T>(v: &mut big_vector::BigVector<T>, i: u64): &mut T
+
public fun borrow_mut<T>(self: &mut big_vector::BigVector<T>, i: u64): &mut T
 
-
aborts_if i >= length(v);
-ensures result == spec_at(v, i);
+
aborts_if i >= length(self);
+ensures result == spec_at(self, i);
 
@@ -926,7 +926,7 @@ Return true if the vector v has no elements and ### Function `append` -
public fun append<T: store>(lhs: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
+
public fun append<T: store>(self: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
 
@@ -942,19 +942,19 @@ Return true if the vector v has no elements and ### Function `push_back` -
public fun push_back<T: store>(v: &mut big_vector::BigVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut big_vector::BigVector<T>, val: T)
 
-
let num_buckets = spec_table_len(v.buckets);
+
let num_buckets = spec_table_len(self.buckets);
 include PushbackAbortsIf<T>;
-ensures length(v) == length(old(v)) + 1;
-ensures v.end_index == old(v.end_index) + 1;
-ensures spec_at(v, v.end_index-1) == val;
-ensures forall i in 0..v.end_index-1: spec_at(v, i) == spec_at(old(v), i);
-ensures v.bucket_size == old(v).bucket_size;
+ensures length(self) == length(old(self)) + 1;
+ensures self.end_index == old(self.end_index) + 1;
+ensures spec_at(self, self.end_index-1) == val;
+ensures forall i in 0..self.end_index-1: spec_at(self, i) == spec_at(old(self), i);
+ensures self.bucket_size == old(self).bucket_size;
 
@@ -964,10 +964,10 @@ Return true if the vector v has no elements and
schema PushbackAbortsIf<T> {
-    v: BigVector<T>;
-    let num_buckets = spec_table_len(v.buckets);
-    aborts_if num_buckets * v.bucket_size > MAX_U64;
-    aborts_if v.end_index + 1 > MAX_U64;
+    self: BigVector<T>;
+    let num_buckets = spec_table_len(self.buckets);
+    aborts_if num_buckets * self.bucket_size > MAX_U64;
+    aborts_if self.end_index + 1 > MAX_U64;
 }
 
@@ -978,16 +978,16 @@ Return true if the vector v has no elements and ### Function `pop_back` -
public fun pop_back<T>(v: &mut big_vector::BigVector<T>): T
+
public fun pop_back<T>(self: &mut big_vector::BigVector<T>): T
 
-
aborts_if is_empty(v);
-ensures length(v) == length(old(v)) - 1;
-ensures result == old(spec_at(v, v.end_index-1));
-ensures forall i in 0..v.end_index: spec_at(v, i) == spec_at(old(v), i);
+
aborts_if is_empty(self);
+ensures length(self) == length(old(self)) - 1;
+ensures result == old(spec_at(self, self.end_index-1));
+ensures forall i in 0..self.end_index: spec_at(self, i) == spec_at(old(self), i);
 
@@ -997,7 +997,7 @@ Return true if the vector v has no elements and ### Function `remove` -
public fun remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
@@ -1013,16 +1013,16 @@ Return true if the vector v has no elements and ### Function `swap_remove` -
public fun swap_remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
pragma verify_duration_estimate = 120;
-aborts_if i >= length(v);
-ensures length(v) == length(old(v)) - 1;
-ensures result == spec_at(old(v), i);
+aborts_if i >= length(self);
+ensures length(self) == length(old(self)) - 1;
+ensures result == spec_at(old(self), i);
 
@@ -1032,20 +1032,20 @@ Return true if the vector v has no elements and ### Function `swap` -
public fun swap<T>(v: &mut big_vector::BigVector<T>, i: u64, j: u64)
+
public fun swap<T>(self: &mut big_vector::BigVector<T>, i: u64, j: u64)
 
pragma verify_duration_estimate = 1000;
-aborts_if i >= length(v) || j >= length(v);
-ensures length(v) == length(old(v));
-ensures spec_at(v, i) == spec_at(old(v), j);
-ensures spec_at(v, j) == spec_at(old(v), i);
-ensures forall idx in 0..length(v)
+aborts_if i >= length(self) || j >= length(self);
+ensures length(self) == length(old(self));
+ensures spec_at(self, i) == spec_at(old(self), j);
+ensures spec_at(self, j) == spec_at(old(self), i);
+ensures forall idx in 0..length(self)
     where idx != i && idx != j:
-    spec_at(v, idx) == spec_at(old(v), idx);
+    spec_at(self, idx) == spec_at(old(self), idx);
 
@@ -1055,7 +1055,7 @@ Return true if the vector v has no elements and ### Function `reverse` -
public fun reverse<T>(v: &mut big_vector::BigVector<T>)
+
public fun reverse<T>(self: &mut big_vector::BigVector<T>)
 
@@ -1071,7 +1071,7 @@ Return true if the vector v has no elements and ### Function `index_of` -
public fun index_of<T>(v: &big_vector::BigVector<T>, val: &T): (bool, u64)
+
public fun index_of<T>(self: &big_vector::BigVector<T>, val: &T): (bool, u64)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/bls12381.md b/aptos-move/framework/aptos-stdlib/doc/bls12381.md index 5f3f76d2527b1..da241d99c0969 100644 --- a/aptos-move/framework/aptos-stdlib/doc/bls12381.md +++ b/aptos-move/framework/aptos-stdlib/doc/bls12381.md @@ -323,7 +323,7 @@ The associated SK is 07416693b6b32c84abe45578728e2379f525729e5b94762435a31e65ecc Random signature generated by running cargo test -- bls12381_sample_signature --nocapture --include-ignored in crates/aptos-crypto. -The message signed is "Hello Supra!" and the associated SK is 07416693b6b32c84abe45578728e2379f525729e5b94762435a31e65ecc728da. +The message signed is "Hello Aptos!" and the associated SK is 07416693b6b32c84abe45578728e2379f525729e5b94762435a31e65ecc728da.
const RANDOM_SIGNATURE: vector<u8> = [160, 26, 101, 133, 79, 152, 125, 52, 52, 20, 155, 127, 8, 247, 7, 48, 227, 11, 36, 25, 132, 232, 113, 43, 194, 172, 168, 133, 214, 50, 170, 252, 237, 76, 63, 102, 18, 9, 222, 187, 107, 28, 134, 1, 50, 102, 35, 204, 22, 202, 47, 108, 158, 220, 83, 183, 184, 139, 116, 53, 251, 107, 5, 221, 236, 228, 24, 210, 195, 77, 198, 172, 162, 245, 161, 26, 121, 230, 119, 116, 88, 44, 20, 8, 74, 1, 220, 183, 130, 14, 76, 180, 186, 208, 234, 141];
diff --git a/aptos-move/framework/aptos-stdlib/doc/capability.md b/aptos-move/framework/aptos-stdlib/doc/capability.md
index 5daf02b7ad916..2d2aef644e829 100644
--- a/aptos-move/framework/aptos-stdlib/doc/capability.md
+++ b/aptos-move/framework/aptos-stdlib/doc/capability.md
@@ -400,7 +400,7 @@ Returns the root address associated with the given capability token. Only the ow
 of the feature can do this.
 
 
-
public fun root_addr<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature): address
+
public fun root_addr<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature): address
 
@@ -409,8 +409,8 @@ of the feature can do this. Implementation -
public fun root_addr<Feature>(cap: Cap<Feature>, _feature_witness: &Feature): address {
-    cap.root
+
public fun root_addr<Feature>(self: Cap<Feature>, _feature_witness: &Feature): address {
+    self.root
 }
 
@@ -425,7 +425,7 @@ of the feature can do this. Returns the root address associated with the given linear capability token. -
public fun linear_root_addr<Feature>(cap: capability::LinearCap<Feature>, _feature_witness: &Feature): address
+
public fun linear_root_addr<Feature>(self: capability::LinearCap<Feature>, _feature_witness: &Feature): address
 
@@ -434,8 +434,8 @@ Returns the root address associated with the given linear capability token. Implementation -
public fun linear_root_addr<Feature>(cap: LinearCap<Feature>, _feature_witness: &Feature): address {
-    cap.root
+
public fun linear_root_addr<Feature>(self: LinearCap<Feature>, _feature_witness: &Feature): address {
+    self.root
 }
 
@@ -451,7 +451,7 @@ Registers a delegation relation. If the relation already exists, this function d nothing. -
public fun delegate<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
public fun delegate<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
 
@@ -460,12 +460,12 @@ nothing. Implementation -
public fun delegate<Feature>(cap: Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
public fun delegate<Feature>(self: Cap<Feature>, _feature_witness: &Feature, to: &signer)
 acquires CapState {
     let addr = signer::address_of(to);
     if (exists<CapDelegateState<Feature>>(addr)) return;
-    move_to(to, CapDelegateState<Feature> { root: cap.root });
-    add_element(&mut borrow_global_mut<CapState<Feature>>(cap.root).delegates, addr);
+    move_to(to, CapDelegateState<Feature> { root: self.root });
+    add_element(&mut borrow_global_mut<CapState<Feature>>(self.root).delegates, addr);
 }
 
@@ -480,7 +480,7 @@ nothing. Revokes a delegation relation. If no relation exists, this function does nothing. -
public fun revoke<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
+
public fun revoke<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
 
@@ -489,12 +489,12 @@ Revokes a delegation relation. If no relation exists, this function does nothing Implementation -
public fun revoke<Feature>(cap: Cap<Feature>, _feature_witness: &Feature, from: address)
+
public fun revoke<Feature>(self: Cap<Feature>, _feature_witness: &Feature, from: address)
 acquires CapState, CapDelegateState
 {
     if (!exists<CapDelegateState<Feature>>(from)) return;
     let CapDelegateState { root: _root } = move_from<CapDelegateState<Feature>>(from);
-    remove_element(&mut borrow_global_mut<CapState<Feature>>(cap.root).delegates, &from);
+    remove_element(&mut borrow_global_mut<CapState<Feature>>(self.root).delegates, &from);
 }
 
@@ -676,7 +676,7 @@ Helper specification function to check whether a delegated capability exists at ### Function `delegate` -
public fun delegate<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
public fun delegate<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
 
@@ -684,8 +684,8 @@ Helper specification function to check whether a delegated capability exists at
let addr = signer::address_of(to);
 ensures spec_has_delegate_cap<Feature>(addr);
-ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> global<CapDelegateState<Feature>>(addr).root == cap.root;
-ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> vector::spec_contains(spec_delegates<Feature>(cap.root), addr);
+ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> global<CapDelegateState<Feature>>(addr).root == self.root;
+ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> vector::spec_contains(spec_delegates<Feature>(self.root), addr);
 
@@ -695,7 +695,7 @@ Helper specification function to check whether a delegated capability exists at ### Function `revoke` -
public fun revoke<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
+
public fun revoke<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/comparator.md b/aptos-move/framework/aptos-stdlib/doc/comparator.md index 1949f5812c5aa..527e13d59a077 100644 --- a/aptos-move/framework/aptos-stdlib/doc/comparator.md +++ b/aptos-move/framework/aptos-stdlib/doc/comparator.md @@ -92,7 +92,7 @@ Provides a framework for comparing two elements -
public fun is_equal(result: &comparator::Result): bool
+
public fun is_equal(self: &comparator::Result): bool
 
@@ -101,8 +101,8 @@ Provides a framework for comparing two elements Implementation -
public fun is_equal(result: &Result): bool {
-    result.inner == EQUAL
+
public fun is_equal(self: &Result): bool {
+    self.inner == EQUAL
 }
 
@@ -116,7 +116,7 @@ Provides a framework for comparing two elements -
public fun is_smaller_than(result: &comparator::Result): bool
+
public fun is_smaller_than(self: &comparator::Result): bool
 
@@ -125,8 +125,8 @@ Provides a framework for comparing two elements Implementation -
public fun is_smaller_than(result: &Result): bool {
-    result.inner == SMALLER
+
public fun is_smaller_than(self: &Result): bool {
+    self.inner == SMALLER
 }
 
@@ -140,7 +140,7 @@ Provides a framework for comparing two elements -
public fun is_greater_than(result: &comparator::Result): bool
+
public fun is_greater_than(self: &comparator::Result): bool
 
@@ -149,8 +149,8 @@ Provides a framework for comparing two elements Implementation -
public fun is_greater_than(result: &Result): bool {
-    result.inner == GREATER
+
public fun is_greater_than(self: &Result): bool {
+    self.inner == GREATER
 }
 
@@ -268,14 +268,14 @@ Provides a framework for comparing two elements ### Function `is_equal` -
public fun is_equal(result: &comparator::Result): bool
+
public fun is_equal(self: &comparator::Result): bool
 
aborts_if false;
-let res = result;
+let res = self;
 ensures result == (res.inner == EQUAL);
 
@@ -286,14 +286,14 @@ Provides a framework for comparing two elements ### Function `is_smaller_than` -
public fun is_smaller_than(result: &comparator::Result): bool
+
public fun is_smaller_than(self: &comparator::Result): bool
 
aborts_if false;
-let res = result;
+let res = self;
 ensures result == (res.inner == SMALLER);
 
@@ -304,14 +304,14 @@ Provides a framework for comparing two elements ### Function `is_greater_than` -
public fun is_greater_than(result: &comparator::Result): bool
+
public fun is_greater_than(self: &comparator::Result): bool
 
aborts_if false;
-let res = result;
+let res = self;
 ensures result == (res.inner == GREATER);
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/copyable_any.md b/aptos-move/framework/aptos-stdlib/doc/copyable_any.md index 7ac120437e652..ab34a75e7e8ed 100644 --- a/aptos-move/framework/aptos-stdlib/doc/copyable_any.md +++ b/aptos-move/framework/aptos-stdlib/doc/copyable_any.md @@ -110,7 +110,7 @@ also required from T. Unpack a value from the Any representation. This aborts if the value has not the expected type T. -
public fun unpack<T>(x: copyable_any::Any): T
+
public fun unpack<T>(self: copyable_any::Any): T
 
@@ -119,9 +119,9 @@ Unpack a value from the Any Implementation -
public fun unpack<T>(x: Any): T {
-    assert!(type_info::type_name<T>() == x.type_name, error::invalid_argument(ETYPE_MISMATCH));
-    from_bytes<T>(x.data)
+
public fun unpack<T>(self: Any): T {
+    assert!(type_info::type_name<T>() == self.type_name, error::invalid_argument(ETYPE_MISMATCH));
+    from_bytes<T>(self.data)
 }
 
@@ -136,7 +136,7 @@ Unpack a value from the Any Returns the type name of this Any -
public fun type_name(x: &copyable_any::Any): &string::String
+
public fun type_name(self: &copyable_any::Any): &string::String
 
@@ -145,8 +145,8 @@ Returns the type name of this Any Implementation -
public fun type_name(x: &Any): &String {
-    &x.type_name
+
public fun type_name(self: &Any): &String {
+    &self.type_name
 }
 
@@ -186,14 +186,14 @@ Returns the type name of this Any ### Function `unpack` -
public fun unpack<T>(x: copyable_any::Any): T
+
public fun unpack<T>(self: copyable_any::Any): T
 
include UnpackAbortsIf<T>;
-ensures result == from_bcs::deserialize<T>(x.data);
+ensures result == from_bcs::deserialize<T>(self.data);
 
@@ -203,9 +203,9 @@ Returns the type name of this Any
schema UnpackAbortsIf<T> {
-    x: Any;
-    aborts_if type_info::type_name<T>() != x.type_name;
-    aborts_if !from_bcs::deserializable<T>(x.data);
+    self: Any;
+    aborts_if type_info::type_name<T>() != self.type_name;
+    aborts_if !from_bcs::deserializable<T>(self.data);
 }
 
@@ -216,14 +216,14 @@ Returns the type name of this Any ### Function `type_name` -
public fun type_name(x: &copyable_any::Any): &string::String
+
public fun type_name(self: &copyable_any::Any): &string::String
 
aborts_if false;
-ensures result == x.type_name;
+ensures result == self.type_name;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/ed25519.md b/aptos-move/framework/aptos-stdlib/doc/ed25519.md index 205f656ec3d1c..ed3bb72bd34fe 100644 --- a/aptos-move/framework/aptos-stdlib/doc/ed25519.md +++ b/aptos-move/framework/aptos-stdlib/doc/ed25519.md @@ -217,7 +217,7 @@ The size of a serialized signature, in bytes. -The identifier of the Ed25519 signature scheme, which is used when deriving Supra authentication keys by hashing +The identifier of the Ed25519 signature scheme, which is used when deriving Aptos authentication keys by hashing it together with an Ed25519 public key. @@ -557,7 +557,7 @@ Helper method to construct a SignedMessage struct. ## Function `unvalidated_public_key_to_authentication_key` -Derives the Supra-specific authentication key of the given Ed25519 public key. +Derives the Aptos-specific authentication key of the given Ed25519 public key.
public fun unvalidated_public_key_to_authentication_key(pk: &ed25519::UnvalidatedPublicKey): vector<u8>
@@ -582,7 +582,7 @@ Derives the Supra-specific authentication key of the given Ed25519 public key.
 
 ## Function `validated_public_key_to_authentication_key`
 
-Derives the Supra-specific authentication key of the given Ed25519 public key.
+Derives the Aptos-specific authentication key of the given Ed25519 public key.
 
 
 
public fun validated_public_key_to_authentication_key(pk: &ed25519::ValidatedPublicKey): vector<u8>
@@ -607,7 +607,7 @@ Derives the Supra-specific authentication key of the given Ed25519 public key.
 
 ## Function `public_key_bytes_to_authentication_key`
 
-Derives the Supra-specific authentication key of the given Ed25519 public key.
+Derives the Aptos-specific authentication key of the given Ed25519 public key.
 
 
 
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>
diff --git a/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md b/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md
index 22571462f6c30..d13fba14f020f 100644
--- a/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md
+++ b/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md
@@ -168,10 +168,10 @@ The computed ratio when converting to a sub(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun sub(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
@@ -180,8 +180,8 @@ Returns x - y. x must be not less than y. Implementation -
public fun sub(x: FixedPoint64, y: FixedPoint64): FixedPoint64 {
-    let x_raw = get_raw_value(x);
+
public fun sub(self: FixedPoint64, y: FixedPoint64): FixedPoint64 {
+    let x_raw = get_raw_value(self);
     let y_raw = get_raw_value(y);
     assert!(x_raw >= y_raw, ENEGATIVE_RESULT);
     create_from_raw_value(x_raw - y_raw)
@@ -196,10 +196,10 @@ Returns x - y. x must be not less than y.
 
 ## Function `add`
 
-Returns x + y. The result cannot be greater than MAX_U128.
+Returns self + y. The result cannot be greater than MAX_U128.
 
 
-
public fun add(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun add(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
@@ -208,8 +208,8 @@ Returns x + y. The result cannot be greater than MAX_U128. Implementation -
public fun add(x: FixedPoint64, y: FixedPoint64): FixedPoint64 {
-    let x_raw = get_raw_value(x);
+
public fun add(self: FixedPoint64, y: FixedPoint64): FixedPoint64 {
+    let x_raw = get_raw_value(self);
     let y_raw = get_raw_value(y);
     let result = (x_raw as u256) + (y_raw as u256);
     assert!(result <= MAX_U128, ERATIO_OUT_OF_RANGE);
@@ -372,7 +372,7 @@ adding or subtracting FixedPoint64 values, can be done using the raw
 values directly.
 
 
-
public fun get_raw_value(num: fixed_point64::FixedPoint64): u128
+
public fun get_raw_value(self: fixed_point64::FixedPoint64): u128
 
@@ -381,8 +381,8 @@ values directly. Implementation -
public fun get_raw_value(num: FixedPoint64): u128 {
-    num.value
+
public fun get_raw_value(self: FixedPoint64): u128 {
+    self.value
 }
 
@@ -397,7 +397,7 @@ values directly. Returns true if the ratio is zero. -
public fun is_zero(num: fixed_point64::FixedPoint64): bool
+
public fun is_zero(self: fixed_point64::FixedPoint64): bool
 
@@ -406,8 +406,8 @@ Returns true if the ratio is zero. Implementation -
public fun is_zero(num: FixedPoint64): bool {
-    num.value == 0
+
public fun is_zero(self: FixedPoint64): bool {
+    self.value == 0
 }
 
@@ -477,10 +477,10 @@ Returns the larger of the two FixedPoint64 numbers. ## Function `less_or_equal` -Returns true if num1 <= num2 +Returns true if self <= num2 -
public fun less_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -489,8 +489,8 @@ Returns true if num1 <= num2 Implementation -
public fun less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value <= num2.value
+
public fun less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value <= num2.value
 }
 
@@ -502,10 +502,10 @@ Returns true if num1 <= num2 ## Function `less` -Returns true if num1 < num2 +Returns true if self < num2 -
public fun less(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -514,8 +514,8 @@ Returns true if num1 < num2 Implementation -
public fun less(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value < num2.value
+
public fun less(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value < num2.value
 }
 
@@ -527,10 +527,10 @@ Returns true if num1 < num2 ## Function `greater_or_equal` -Returns true if num1 >= num2 +Returns true if self >= num2 -
public fun greater_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -539,8 +539,8 @@ Returns true if num1 >= num2 Implementation -
public fun greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value >= num2.value
+
public fun greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value >= num2.value
 }
 
@@ -552,10 +552,10 @@ Returns true if num1 >= num2 ## Function `greater` -Returns true if num1 > num2 +Returns true if self > num2 -
public fun greater(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -564,8 +564,8 @@ Returns true if num1 > num2 Implementation -
public fun greater(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value > num2.value
+
public fun greater(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value > num2.value
 }
 
@@ -577,10 +577,10 @@ Returns true if num1 > num2 ## Function `equal` -Returns true if num1 = num2 +Returns true if self = num2 -
public fun equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -589,8 +589,8 @@ Returns true if num1 = num2 Implementation -
public fun equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value == num2.value
+
public fun equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value == num2.value
 }
 
@@ -602,10 +602,10 @@ Returns true if num1 = num2 ## Function `almost_equal` -Returns true if num1 almost equals to num2, which means abs(num1-num2) <= precision +Returns true if self almost equals to num2, which means abs(num1-num2) <= precision -
public fun almost_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
+
public fun almost_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
 
@@ -614,11 +614,11 @@ Returns true if num1 almost equals to num2, which means abs(num1-num2) <= precis Implementation -
public fun almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
-    if (num1.value > num2.value) {
-        (num1.value - num2.value <= precision.value)
+
public fun almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
+    if (self.value > num2.value) {
+        (self.value - num2.value <= precision.value)
     } else {
-        (num2.value - num1.value <= precision.value)
+        (num2.value - self.value <= precision.value)
     }
 }
 
@@ -661,7 +661,7 @@ Create a fixedpoint value from a u128 value. Returns the largest integer less than or equal to a given number. -
public fun floor(num: fixed_point64::FixedPoint64): u128
+
public fun floor(self: fixed_point64::FixedPoint64): u128
 
@@ -670,8 +670,8 @@ Returns the largest integer less than or equal to a given number. Implementation -
public fun floor(num: FixedPoint64): u128 {
-    num.value >> 64
+
public fun floor(self: FixedPoint64): u128 {
+    self.value >> 64
 }
 
@@ -686,7 +686,7 @@ Returns the largest integer less than or equal to a given number. Rounds up the given FixedPoint64 to the next largest integer. -
public fun ceil(num: fixed_point64::FixedPoint64): u128
+
public fun ceil(self: fixed_point64::FixedPoint64): u128
 
@@ -695,9 +695,9 @@ Rounds up the given FixedPoint64 to the next largest integer. Implementation -
public fun ceil(num: FixedPoint64): u128 {
-    let floored_num = floor(num) << 64;
-    if (num.value == floored_num) {
+
public fun ceil(self: FixedPoint64): u128 {
+    let floored_num = floor(self) << 64;
+    if (self.value == floored_num) {
         return floored_num >> 64
     };
     let val = ((floored_num as u256) + (1 << 64));
@@ -716,7 +716,7 @@ Rounds up the given FixedPoint64 to the next largest integer.
 Returns the value of a FixedPoint64 to the nearest integer.
 
 
-
public fun round(num: fixed_point64::FixedPoint64): u128
+
public fun round(self: fixed_point64::FixedPoint64): u128
 
@@ -725,13 +725,13 @@ Returns the value of a FixedPoint64 to the nearest integer. Implementation -
public fun round(num: FixedPoint64): u128 {
-    let floored_num = floor(num) << 64;
+
public fun round(self: FixedPoint64): u128 {
+    let floored_num = floor(self) << 64;
     let boundary = floored_num + ((1 << 64) / 2);
-    if (num.value < boundary) {
+    if (self.value < boundary) {
         floored_num >> 64
     } else {
-        ceil(num)
+        ceil(self)
     }
 }
 
@@ -757,15 +757,15 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `sub` -
public fun sub(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun sub(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
pragma opaque;
-aborts_if x.value < y.value with ENEGATIVE_RESULT;
-ensures result.value == x.value - y.value;
+aborts_if self.value < y.value with ENEGATIVE_RESULT;
+ensures result.value == self.value - y.value;
 
@@ -775,15 +775,15 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `add` -
public fun add(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun add(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
pragma opaque;
-aborts_if (x.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE;
-ensures result.value == x.value + y.value;
+aborts_if (self.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE;
+ensures result.value == self.value + y.value;
 
@@ -1010,7 +1010,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `less_or_equal` -
public fun less_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1018,7 +1018,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_less_or_equal(num1, num2);
+ensures result == spec_less_or_equal(self, num2);
 
@@ -1027,8 +1027,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value <= num2.value
+
fun spec_less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value <= num2.value
 }
 
@@ -1039,7 +1039,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `less` -
public fun less(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1047,7 +1047,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_less(num1, num2);
+ensures result == spec_less(self, num2);
 
@@ -1056,8 +1056,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_less(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value < num2.value
+
fun spec_less(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value < num2.value
 }
 
@@ -1068,7 +1068,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `greater_or_equal` -
public fun greater_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1076,7 +1076,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_greater_or_equal(num1, num2);
+ensures result == spec_greater_or_equal(self, num2);
 
@@ -1085,8 +1085,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value >= num2.value
+
fun spec_greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value >= num2.value
 }
 
@@ -1097,7 +1097,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `greater` -
public fun greater(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1105,7 +1105,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_greater(num1, num2);
+ensures result == spec_greater(self, num2);
 
@@ -1114,8 +1114,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_greater(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value > num2.value
+
fun spec_greater(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value > num2.value
 }
 
@@ -1126,7 +1126,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `equal` -
public fun equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1134,7 +1134,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_equal(num1, num2);
+ensures result == spec_equal(self, num2);
 
@@ -1143,8 +1143,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value == num2.value
+
fun spec_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value == num2.value
 }
 
@@ -1155,7 +1155,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `almost_equal` -
public fun almost_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
+
public fun almost_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
 
@@ -1163,7 +1163,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_almost_equal(num1, num2, precision);
+ensures result == spec_almost_equal(self, num2, precision);
 
@@ -1172,11 +1172,11 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
-   if (num1.value > num2.value) {
-       (num1.value - num2.value <= precision.value)
+
fun spec_almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
+   if (self.value > num2.value) {
+       (self.value - num2.value <= precision.value)
    } else {
-       (num2.value - num1.value <= precision.value)
+       (num2.value - self.value <= precision.value)
    }
 }
 
@@ -1230,7 +1230,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `floor` -
public fun floor(num: fixed_point64::FixedPoint64): u128
+
public fun floor(self: fixed_point64::FixedPoint64): u128
 
@@ -1238,7 +1238,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_floor(num);
+ensures result == spec_floor(self);
 
@@ -1247,12 +1247,12 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_floor(val: FixedPoint64): u128 {
-   let fractional = val.value % (1 << 64);
+
fun spec_floor(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
    if (fractional == 0) {
-       val.value >> 64
+       self.value >> 64
    } else {
-       (val.value - fractional) >> 64
+       (self.value - fractional) >> 64
    }
 }
 
@@ -1264,7 +1264,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `ceil` -
public fun ceil(num: fixed_point64::FixedPoint64): u128
+
public fun ceil(self: fixed_point64::FixedPoint64): u128
 
@@ -1273,7 +1273,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma verify_duration_estimate = 1000;
 pragma opaque;
 aborts_if false;
-ensures result == spec_ceil(num);
+ensures result == spec_ceil(self);
 
@@ -1282,13 +1282,13 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_ceil(val: FixedPoint64): u128 {
-   let fractional = val.value % (1 << 64);
+
fun spec_ceil(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
    let one = 1 << 64;
    if (fractional == 0) {
-       val.value >> 64
+       self.value >> 64
    } else {
-       (val.value - fractional + one) >> 64
+       (self.value - fractional + one) >> 64
    }
 }
 
@@ -1300,7 +1300,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `round` -
public fun round(num: fixed_point64::FixedPoint64): u128
+
public fun round(self: fixed_point64::FixedPoint64): u128
 
@@ -1308,7 +1308,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_round(num);
+ensures result == spec_round(self);
 
@@ -1317,14 +1317,14 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_round(val: FixedPoint64): u128 {
-   let fractional = val.value % (1 << 64);
+
fun spec_round(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
    let boundary = (1 << 64) / 2;
    let one = 1 << 64;
    if (fractional < boundary) {
-       (val.value - fractional) >> 64
+       (self.value - fractional) >> 64
    } else {
-       (val.value - fractional + one) >> 64
+       (self.value - fractional + one) >> 64
    }
 }
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/math128.md b/aptos-move/framework/aptos-stdlib/doc/math128.md index 0160568fc30b7..1f84365d1a599 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math128.md +++ b/aptos-move/framework/aptos-stdlib/doc/math128.md @@ -11,6 +11,7 @@ Standard math utilities missing in the Move Language. - [Function `min`](#0x1_math128_min) - [Function `average`](#0x1_math128_average) - [Function `gcd`](#0x1_math128_gcd) +- [Function `lcm`](#0x1_math128_lcm) - [Function `mul_div`](#0x1_math128_mul_div) - [Function `clamp`](#0x1_math128_clamp) - [Function `pow`](#0x1_math128_pow) @@ -159,6 +160,35 @@ Return greatest common divisor of a & b, via the Eucli + + + + +## Function `lcm` + +Return least common multiple of a & b + + +
public fun lcm(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public inline fun lcm(a: u128, b: u128): u128 {
+    if (a == 0 || b == 0) {
+        0
+    } else {
+        a / gcd(a, b) * b
+    }
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-stdlib/doc/math64.md b/aptos-move/framework/aptos-stdlib/doc/math64.md index b40a67a25860f..bbccd3c2ab032 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math64.md +++ b/aptos-move/framework/aptos-stdlib/doc/math64.md @@ -11,6 +11,7 @@ Standard math utilities missing in the Move Language. - [Function `min`](#0x1_math64_min) - [Function `average`](#0x1_math64_average) - [Function `gcd`](#0x1_math64_gcd) +- [Function `lcm`](#0x1_math64_lcm) - [Function `mul_div`](#0x1_math64_mul_div) - [Function `clamp`](#0x1_math64_clamp) - [Function `pow`](#0x1_math64_pow) @@ -157,6 +158,35 @@ Return greatest common divisor of a & b, via the Eucli + + + + +## Function `lcm` + +Returns least common multiple of a & b. + + +
public fun lcm(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public inline fun lcm(a: u64, b: u64): u64 {
+    if (a == 0 || b == 0) {
+        0
+    } else {
+        a / gcd(a, b) * b
+    }
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md b/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md index 68dd8db47685e..72f0be430bed1 100644 --- a/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md +++ b/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md @@ -193,7 +193,7 @@ Wrong number of bytes were given as input when deserializing an Ed25519 signatur -The identifier of the MultiEd25519 signature scheme, which is used when deriving Supra authentication keys by hashing +The identifier of the MultiEd25519 signature scheme, which is used when deriving Aptos authentication keys by hashing it together with an MultiEd25519 public key. @@ -645,7 +645,7 @@ proof of private key ownership when rotating authentication keys. ## Function `unvalidated_public_key_to_authentication_key` -Derives the Supra-specific authentication key of the given Ed25519 public key. +Derives the Aptos-specific authentication key of the given Ed25519 public key.
public fun unvalidated_public_key_to_authentication_key(pk: &multi_ed25519::UnvalidatedPublicKey): vector<u8>
@@ -728,7 +728,7 @@ if bytes does not correctly encode such a PK.
 
 ## Function `validated_public_key_to_authentication_key`
 
-Derives the Supra-specific authentication key of the given Ed25519 public key.
+Derives the Aptos-specific authentication key of the given Ed25519 public key.
 
 
 
public fun validated_public_key_to_authentication_key(pk: &multi_ed25519::ValidatedPublicKey): vector<u8>
@@ -851,7 +851,7 @@ Returns the threshold t <= n of the PK.
 
 ## Function `public_key_bytes_to_authentication_key`
 
-Derives the Supra-specific authentication key of the given Ed25519 public key.
+Derives the Aptos-specific authentication key of the given Ed25519 public key.
 
 
 
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>
diff --git a/aptos-move/framework/aptos-stdlib/doc/pool_u64.md b/aptos-move/framework/aptos-stdlib/doc/pool_u64.md
index a9e3d17c22eb1..2eccd5b2ea3a6 100644
--- a/aptos-move/framework/aptos-stdlib/doc/pool_u64.md
+++ b/aptos-move/framework/aptos-stdlib/doc/pool_u64.md
@@ -299,7 +299,7 @@ Create a new pool with custom scaling_factor.
 Destroy an empty pool. This will fail if the pool has any balance of coins.
 
 
-
public fun destroy_empty(pool: pool_u64::Pool)
+
public fun destroy_empty(self: pool_u64::Pool)
 
@@ -308,8 +308,8 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. Implementation -
public fun destroy_empty(pool: Pool) {
-    assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
+
public fun destroy_empty(self: Pool) {
+    assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
     let Pool {
         shareholders_limit: _,
         total_coins: _,
@@ -317,7 +317,7 @@ Destroy an empty pool. This will fail if the pool has any balance of coins.
         shares: _,
         shareholders: _,
         scaling_factor: _,
-    } = pool;
+    } = self;
 }
 
@@ -329,10 +329,10 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. ## Function `total_coins` -Return pool's total balance of coins. +Return self's total balance of coins. -
public fun total_coins(pool: &pool_u64::Pool): u64
+
public fun total_coins(self: &pool_u64::Pool): u64
 
@@ -341,8 +341,8 @@ Return pool's total balance of coins. Implementation -
public fun total_coins(pool: &Pool): u64 {
-    pool.total_coins
+
public fun total_coins(self: &Pool): u64 {
+    self.total_coins
 }
 
@@ -354,10 +354,10 @@ Return pool's total balance of coins. ## Function `total_shares` -Return the total number of shares across all shareholders in pool. +Return the total number of shares across all shareholders in self. -
public fun total_shares(pool: &pool_u64::Pool): u64
+
public fun total_shares(self: &pool_u64::Pool): u64
 
@@ -366,8 +366,8 @@ Return the total number of shares across all shareholders in pool. Implementation -
public fun total_shares(pool: &Pool): u64 {
-    pool.total_shares
+
public fun total_shares(self: &Pool): u64 {
+    self.total_shares
 }
 
@@ -379,10 +379,10 @@ Return the total number of shares across all shareholders in pool. ## Function `contains` -Return true if shareholder is in pool. +Return true if shareholder is in self. -
public fun contains(pool: &pool_u64::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64::Pool, shareholder: address): bool
 
@@ -391,8 +391,8 @@ Return true if shareholder is in pool. Implementation -
public fun contains(pool: &Pool, shareholder: address): bool {
-    simple_map::contains_key(&pool.shares, &shareholder)
+
public fun contains(self: &Pool, shareholder: address): bool {
+    simple_map::contains_key(&self.shares, &shareholder)
 }
 
@@ -404,10 +404,10 @@ Return true if shareholder is in pool. ## Function `shares` -Return the number of shares of stakeholder in pool. +Return the number of shares of stakeholder in self. -
public fun shares(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun shares(self: &pool_u64::Pool, shareholder: address): u64
 
@@ -416,9 +416,9 @@ Return the number of shares of stakeholder in pool. Implementation -
public fun shares(pool: &Pool, shareholder: address): u64 {
-    if (contains(pool, shareholder)) {
-        *simple_map::borrow(&pool.shares, &shareholder)
+
public fun shares(self: &Pool, shareholder: address): u64 {
+    if (contains(self, shareholder)) {
+        *simple_map::borrow(&self.shares, &shareholder)
     } else {
         0
     }
@@ -433,10 +433,10 @@ Return the number of shares of stakeholder in pool.
 
 ## Function `balance`
 
-Return the balance in coins of shareholder in pool.
+Return the balance in coins of shareholder in self.
 
 
-
public fun balance(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64::Pool, shareholder: address): u64
 
@@ -445,9 +445,9 @@ Return the balance in coins of shareholder in pool. Implementation -
public fun balance(pool: &Pool, shareholder: address): u64 {
-    let num_shares = shares(pool, shareholder);
-    shares_to_amount(pool, num_shares)
+
public fun balance(self: &Pool, shareholder: address): u64 {
+    let num_shares = shares(self, shareholder);
+    shares_to_amount(self, num_shares)
 }
 
@@ -459,10 +459,10 @@ Return the balance in coins of shareholder in pool. ## Function `shareholders` -Return the list of shareholders in pool. +Return the list of shareholders in self. -
public fun shareholders(pool: &pool_u64::Pool): vector<address>
+
public fun shareholders(self: &pool_u64::Pool): vector<address>
 
@@ -471,8 +471,8 @@ Return the list of shareholders in pool. Implementation -
public fun shareholders(pool: &Pool): vector<address> {
-    pool.shareholders
+
public fun shareholders(self: &Pool): vector<address> {
+    self.shareholders
 }
 
@@ -484,10 +484,10 @@ Return the list of shareholders in pool. ## Function `shareholders_count` -Return the number of shareholders in pool. +Return the number of shareholders in self. -
public fun shareholders_count(pool: &pool_u64::Pool): u64
+
public fun shareholders_count(self: &pool_u64::Pool): u64
 
@@ -496,8 +496,8 @@ Return the number of shareholders in pool. Implementation -
public fun shareholders_count(pool: &Pool): u64 {
-    vector::length(&pool.shareholders)
+
public fun shareholders_count(self: &Pool): u64 {
+    vector::length(&self.shareholders)
 }
 
@@ -509,10 +509,10 @@ Return the number of shareholders in pool. ## Function `update_total_coins` -Update pool's total balance of coins. +Update self's total balance of coins. -
public fun update_total_coins(pool: &mut pool_u64::Pool, new_total_coins: u64)
+
public fun update_total_coins(self: &mut pool_u64::Pool, new_total_coins: u64)
 
@@ -521,8 +521,8 @@ Update pool's total balance of coins. Implementation -
public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) {
-    pool.total_coins = new_total_coins;
+
public fun update_total_coins(self: &mut Pool, new_total_coins: u64) {
+    self.total_coins = new_total_coins;
 }
 
@@ -537,7 +537,7 @@ Update pool's total balance of coins. Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. -
public fun buy_in(pool: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
+
public fun buy_in(self: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
 
@@ -546,16 +546,16 @@ Allow an existing or new shareholder to add their coins to the pool in exchange Implementation -
public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u64 {
+
public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 {
     if (coins_amount == 0) return 0;
 
-    let new_shares = amount_to_shares(pool, coins_amount);
-    assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
-    assert!(MAX_U64 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    let new_shares = amount_to_shares(self, coins_amount);
+    assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    assert!(MAX_U64 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
 
-    pool.total_coins = pool.total_coins + coins_amount;
-    pool.total_shares = pool.total_shares + new_shares;
-    add_shares(pool, shareholder, new_shares);
+    self.total_coins = self.total_coins + coins_amount;
+    self.total_shares = self.total_shares + new_shares;
+    add_shares(self, shareholder, new_shares);
     new_shares
 }
 
@@ -568,11 +568,11 @@ Allow an existing or new shareholder to add their coins to the pool in exchange ## Function `add_shares` -Add the number of shares directly for shareholder in pool. +Add the number of shares directly for shareholder in self. This would dilute other shareholders if the pool's balance of coins didn't change. -
fun add_shares(pool: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
+
fun add_shares(self: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
 
@@ -581,9 +581,9 @@ This would dilute other shareholders if the pool's balance of coins didn't chang Implementation -
fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u64): u64 {
-    if (contains(pool, shareholder)) {
-        let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder);
+
fun add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 {
+    if (contains(self, shareholder)) {
+        let existing_shares = simple_map::borrow_mut(&mut self.shares, &shareholder);
         let current_shares = *existing_shares;
         assert!(MAX_U64 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW));
 
@@ -591,12 +591,12 @@ This would dilute other shareholders if the pool's balance of coins didn't chang
         *existing_shares
     } else if (new_shares > 0) {
         assert!(
-            vector::length(&pool.shareholders) < pool.shareholders_limit,
+            vector::length(&self.shareholders) < self.shareholders_limit,
             error::invalid_state(ETOO_MANY_SHAREHOLDERS),
         );
 
-        vector::push_back(&mut pool.shareholders, shareholder);
-        simple_map::add(&mut pool.shares, shareholder, new_shares);
+        vector::push_back(&mut self.shareholders, shareholder);
+        simple_map::add(&mut self.shares, shareholder, new_shares);
         new_shares
     } else {
         new_shares
@@ -612,10 +612,10 @@ This would dilute other shareholders if the pool's balance of coins didn't chang
 
 ## Function `redeem_shares`
 
-Allow shareholder to redeem their shares in pool for coins.
+Allow shareholder to redeem their shares in self for coins.
 
 
-
public fun redeem_shares(pool: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
+
public fun redeem_shares(self: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
 
@@ -624,16 +624,16 @@ Allow shareholder to redeem their shares in pool for c Implementation -
public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
+
public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
 
     if (shares_to_redeem == 0) return 0;
 
-    let redeemed_coins = shares_to_amount(pool, shares_to_redeem);
-    pool.total_coins = pool.total_coins - redeemed_coins;
-    pool.total_shares = pool.total_shares - shares_to_redeem;
-    deduct_shares(pool, shareholder, shares_to_redeem);
+    let redeemed_coins = shares_to_amount(self, shares_to_redeem);
+    self.total_coins = self.total_coins - redeemed_coins;
+    self.total_shares = self.total_shares - shares_to_redeem;
+    deduct_shares(self, shareholder, shares_to_redeem);
 
     redeemed_coins
 }
@@ -650,7 +650,7 @@ Allow shareholder to redeem their shares in pool for c
 Transfer shares from shareholder_1 to shareholder_2.
 
 
-
public fun transfer_shares(pool: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
+
public fun transfer_shares(self: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
 
@@ -660,17 +660,17 @@ Transfer shares from shareholder_1 to shareholder_2.
public fun transfer_shares(
-    pool: &mut Pool,
+    self: &mut Pool,
     shareholder_1: address,
     shareholder_2: address,
     shares_to_transfer: u64,
 ) {
-    assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
+    assert!(contains(self, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
     if (shares_to_transfer == 0) return;
 
-    deduct_shares(pool, shareholder_1, shares_to_transfer);
-    add_shares(pool, shareholder_2, shares_to_transfer);
+    deduct_shares(self, shareholder_1, shares_to_transfer);
+    add_shares(self, shareholder_2, shares_to_transfer);
 }
 
@@ -682,10 +682,10 @@ Transfer shares from shareholder_1 to shareholder_2. ## Function `deduct_shares` -Directly deduct shareholder's number of shares in pool and return the number of remaining shares. +Directly deduct shareholder's number of shares in self and return the number of remaining shares. -
fun deduct_shares(pool: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
+
fun deduct_shares(self: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
 
@@ -694,19 +694,19 @@ Directly deduct shareholder's number of shares in pool Implementation -
fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u64): u64 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
+
fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
 
-    let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder);
+    let existing_shares = simple_map::borrow_mut(&mut self.shares, &shareholder);
     *existing_shares = *existing_shares - num_shares;
 
     // Remove the shareholder completely if they have no shares left.
     let remaining_shares = *existing_shares;
     if (remaining_shares == 0) {
-        let (_, shareholder_index) = vector::index_of(&pool.shareholders, &shareholder);
-        vector::remove(&mut pool.shareholders, shareholder_index);
-        simple_map::remove(&mut pool.shares, &shareholder);
+        let (_, shareholder_index) = vector::index_of(&self.shareholders, &shareholder);
+        vector::remove(&mut self.shareholders, shareholder_index);
+        simple_map::remove(&mut self.shares, &shareholder);
     };
 
     remaining_shares
@@ -721,11 +721,11 @@ Directly deduct shareholder's number of shares in pool
 
 ## Function `amount_to_shares`
 
-Return the number of new shares coins_amount can buy in pool.
+Return the number of new shares coins_amount can buy in self.
 amount needs to big enough to avoid rounding number.
 
 
-
public fun amount_to_shares(pool: &pool_u64::Pool, coins_amount: u64): u64
+
public fun amount_to_shares(self: &pool_u64::Pool, coins_amount: u64): u64
 
@@ -734,8 +734,8 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares(pool: &Pool, coins_amount: u64): u64 {
-    amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins)
+
public fun amount_to_shares(self: &Pool, coins_amount: u64): u64 {
+    amount_to_shares_with_total_coins(self, coins_amount, self.total_coins)
 }
 
@@ -747,11 +747,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `amount_to_shares_with_total_coins` -Return the number of new shares coins_amount can buy in pool with a custom total coins number. +Return the number of new shares coins_amount can buy in self with a custom total coins number. amount needs to big enough to avoid rounding number. -
public fun amount_to_shares_with_total_coins(pool: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
+
public fun amount_to_shares_with_total_coins(self: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
 
@@ -760,17 +760,17 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u64 {
+
public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 {
     // No shares yet so amount is worth the same number of shares.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems.
         // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow.
-        coins_amount * pool.scaling_factor
+        coins_amount * self.scaling_factor
     } else {
         // Shares price = total_coins / total existing shares.
         // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        multiply_then_divide(pool, coins_amount, pool.total_shares, total_coins)
+        multiply_then_divide(self, coins_amount, self.total_shares, total_coins)
     }
 }
 
@@ -783,11 +783,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `shares_to_amount` -Return the number of coins shares are worth in pool. +Return the number of coins shares are worth in self. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount(pool: &pool_u64::Pool, shares: u64): u64
+
public fun shares_to_amount(self: &pool_u64::Pool, shares: u64): u64
 
@@ -796,8 +796,8 @@ Return the number of coins shares are worth in pool. Implementation -
public fun shares_to_amount(pool: &Pool, shares: u64): u64 {
-    shares_to_amount_with_total_coins(pool, shares, pool.total_coins)
+
public fun shares_to_amount(self: &Pool, shares: u64): u64 {
+    shares_to_amount_with_total_coins(self, shares, self.total_coins)
 }
 
@@ -809,11 +809,11 @@ Return the number of coins shares are worth in pool. ## Function `shares_to_amount_with_total_coins` -Return the number of coins shares are worth in pool with a custom total coins number. +Return the number of coins shares are worth in self with a custom total coins number. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount_with_total_coins(pool: &pool_u64::Pool, shares: u64, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64::Pool, shares: u64, total_coins: u64): u64
 
@@ -822,15 +822,15 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u64, total_coins: u64): u64 {
+
public fun shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 {
     // No shares or coins yet so shares are worthless.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         0
     } else {
         // Shares price = total_coins / total existing shares.
         // Shares worth = shares * shares price = shares * total_coins / total existing shares.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        multiply_then_divide(pool, shares, total_coins, pool.total_shares)
+        multiply_then_divide(self, shares, total_coins, self.total_shares)
     }
 }
 
@@ -845,7 +845,7 @@ Return the number of coins shares are worth in pool wi -
public fun multiply_then_divide(_pool: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
+
public fun multiply_then_divide(self: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
 
@@ -854,7 +854,7 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun multiply_then_divide(_pool: &Pool, x: u64, y: u64, z: u64): u64 {
+
public fun multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 {
     let result = (to_u128(x) * to_u128(y)) / to_u128(z);
     (result as u64)
 }
@@ -974,14 +974,14 @@ Return the number of coins shares are worth in pool wi
 ### Function `contains`
 
 
-
public fun contains(pool: &pool_u64::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64::Pool, shareholder: address): bool
 
aborts_if false;
-ensures result == spec_contains(pool, shareholder);
+ensures result == spec_contains(self, shareholder);
 
@@ -1007,14 +1007,14 @@ Return the number of coins shares are worth in pool wi ### Function `shares` -
public fun shares(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun shares(self: &pool_u64::Pool, shareholder: address): u64
 
aborts_if false;
-ensures result == spec_shares(pool, shareholder);
+ensures result == spec_shares(self, shareholder);
 
@@ -1024,16 +1024,16 @@ Return the number of coins shares are worth in pool wi ### Function `balance` -
public fun balance(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64::Pool, shareholder: address): u64
 
-
let shares = spec_shares(pool, shareholder);
-let total_coins = pool.total_coins;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
let shares = spec_shares(self, shareholder);
+let total_coins = self.total_coins;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1043,19 +1043,19 @@ Return the number of coins shares are worth in pool wi ### Function `buy_in` -
public fun buy_in(pool: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
+
public fun buy_in(self: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
 
-
let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins);
-aborts_if pool.total_coins + coins_amount > MAX_U64;
-aborts_if pool.total_shares + new_shares > MAX_U64;
+
let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins);
+aborts_if self.total_coins + coins_amount > MAX_U64;
+aborts_if self.total_shares + new_shares > MAX_U64;
 include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares };
 include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares };
-ensures pool.total_coins == old(pool.total_coins) + coins_amount;
-ensures pool.total_shares == old(pool.total_shares) + new_shares;
+ensures self.total_coins == old(self.total_coins) + coins_amount;
+ensures self.total_shares == old(self.total_shares) + new_shares;
 ensures result == new_shares;
 
@@ -1066,7 +1066,7 @@ Return the number of coins shares are worth in pool wi ### Function `add_shares` -
fun add_shares(pool: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
+
fun add_shares(self: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
 
@@ -1074,8 +1074,8 @@ Return the number of coins shares are worth in pool wi
include AddSharesAbortsIf;
 include AddSharesEnsures;
-let key_exists = simple_map::spec_contains_key(pool.shares, shareholder);
-ensures result == if (key_exists) { simple_map::spec_get(pool.shares, shareholder) }
+let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+ensures result == if (key_exists) { simple_map::spec_get(self.shares, shareholder) }
 else { new_shares };
 
@@ -1086,13 +1086,13 @@ Return the number of coins shares are worth in pool wi
schema AddSharesAbortsIf {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = simple_map::spec_contains_key(pool.shares, shareholder);
-    let current_shares = simple_map::spec_get(pool.shares, shareholder);
+    let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+    let current_shares = simple_map::spec_get(self.shares, shareholder);
     aborts_if key_exists && current_shares + new_shares > MAX_U64;
-    aborts_if !key_exists && new_shares > 0 && len(pool.shareholders) >= pool.shareholders_limit;
+    aborts_if !key_exists && new_shares > 0 && len(self.shareholders) >= self.shareholders_limit;
 }
 
@@ -1103,17 +1103,17 @@ Return the number of coins shares are worth in pool wi
schema AddSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = simple_map::spec_contains_key(pool.shares, shareholder);
-    let current_shares = simple_map::spec_get(pool.shares, shareholder);
+    let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+    let current_shares = simple_map::spec_get(self.shares, shareholder);
     ensures key_exists ==>
-        pool.shares == simple_map::spec_set(old(pool.shares), shareholder, current_shares + new_shares);
+        self.shares == simple_map::spec_set(old(self.shares), shareholder, current_shares + new_shares);
     ensures (!key_exists && new_shares > 0) ==>
-        pool.shares == simple_map::spec_set(old(pool.shares), shareholder, new_shares);
+        self.shares == simple_map::spec_set(old(self.shares), shareholder, new_shares);
     ensures (!key_exists && new_shares > 0) ==>
-        vector::eq_push_back(pool.shareholders, old(pool.shareholders), shareholder);
+        vector::eq_push_back(self.shareholders, old(self.shareholders), shareholder);
 }
 
@@ -1140,20 +1140,22 @@ Return the number of coins shares are worth in pool wi ### Function `redeem_shares` -
public fun redeem_shares(pool: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
+
public fun redeem_shares(self: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
 
-
let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins);
-aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < shares_to_redeem;
-aborts_if pool.total_coins < redeemed_coins;
-aborts_if pool.total_shares < shares_to_redeem;
-ensures pool.total_coins == old(pool.total_coins) - redeemed_coins;
-ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem;
-include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem };
+
let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins);
+aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < shares_to_redeem;
+aborts_if self.total_coins < redeemed_coins;
+aborts_if self.total_shares < shares_to_redeem;
+ensures self.total_coins == old(self.total_coins) - redeemed_coins;
+ensures self.total_shares == old(self.total_shares) - shares_to_redeem;
+include shares_to_redeem > 0 ==> DeductSharesEnsures {
+    num_shares: shares_to_redeem
+};
 ensures result == redeemed_coins;
 
@@ -1164,15 +1166,15 @@ Return the number of coins shares are worth in pool wi ### Function `transfer_shares` -
public fun transfer_shares(pool: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
+
public fun transfer_shares(self: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
 
pragma aborts_if_is_partial;
-aborts_if !spec_contains(pool, shareholder_1);
-aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer;
+aborts_if !spec_contains(self, shareholder_1);
+aborts_if spec_shares(self, shareholder_1) < shares_to_transfer;
 
@@ -1182,17 +1184,17 @@ Return the number of coins shares are worth in pool wi ### Function `deduct_shares` -
fun deduct_shares(pool: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
+
fun deduct_shares(self: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
 
-
aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < num_shares;
+
aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < num_shares;
 include DeductSharesEnsures;
-let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares;
-ensures remaining_shares > 0 ==> result == simple_map::spec_get(pool.shares, shareholder);
+let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares;
+ensures remaining_shares > 0 ==> result == simple_map::spec_get(self.shares, shareholder);
 ensures remaining_shares == 0 ==> result == 0;
 
@@ -1203,13 +1205,13 @@ Return the number of coins shares are worth in pool wi
schema DeductSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     num_shares: u64;
-    let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares;
-    ensures remaining_shares > 0 ==> simple_map::spec_get(pool.shares, shareholder) == remaining_shares;
-    ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(pool.shares, shareholder);
-    ensures remaining_shares == 0 ==> !vector::spec_contains(pool.shareholders, shareholder);
+    let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares;
+    ensures remaining_shares > 0 ==> simple_map::spec_get(self.shares, shareholder) == remaining_shares;
+    ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(self.shares, shareholder);
+    ensures remaining_shares == 0 ==> !vector::spec_contains(self.shareholders, shareholder);
 }
 
@@ -1220,18 +1222,18 @@ Return the number of coins shares are worth in pool wi ### Function `amount_to_shares_with_total_coins` -
public fun amount_to_shares_with_total_coins(pool: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
+
public fun amount_to_shares_with_total_coins(self: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (coins_amount * pool.total_shares) / total_coins > MAX_U64;
-aborts_if (pool.total_coins == 0 || pool.total_shares == 0)
-    && coins_amount * pool.scaling_factor > MAX_U64;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0;
-ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (coins_amount * self.total_shares) / total_coins > MAX_U64;
+aborts_if (self.total_coins == 0 || self.total_shares == 0)
+    && coins_amount * self.scaling_factor > MAX_U64;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0;
+ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins);
 
@@ -1241,15 +1243,15 @@ Return the number of coins shares are worth in pool wi ### Function `shares_to_amount_with_total_coins` -
public fun shares_to_amount_with_total_coins(pool: &pool_u64::Pool, shares: u64, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64::Pool, shares: u64, total_coins: u64): u64
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1275,7 +1277,7 @@ Return the number of coins shares are worth in pool wi ### Function `multiply_then_divide` -
public fun multiply_then_divide(_pool: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
+
public fun multiply_then_divide(self: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md b/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md index 281fb751e7dfd..ae7f13852e8fc 100644 --- a/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md +++ b/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md @@ -296,7 +296,7 @@ Create a new pool with custom scaling_factor. Destroy an empty pool. This will fail if the pool has any balance of coins. -
public fun destroy_empty(pool: pool_u64_unbound::Pool)
+
public fun destroy_empty(self: pool_u64_unbound::Pool)
 
@@ -305,14 +305,14 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. Implementation -
public fun destroy_empty(pool: Pool) {
-    assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
+
public fun destroy_empty(self: Pool) {
+    assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
     let Pool {
         total_coins: _,
         total_shares: _,
         shares,
         scaling_factor: _,
-    } = pool;
+    } = self;
     table::destroy_empty<address, u128>(shares);
 }
 
@@ -325,10 +325,10 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. ## Function `total_coins` -Return pool's total balance of coins. +Return self's total balance of coins. -
public fun total_coins(pool: &pool_u64_unbound::Pool): u64
+
public fun total_coins(self: &pool_u64_unbound::Pool): u64
 
@@ -337,8 +337,8 @@ Return pool's total balance of coins. Implementation -
public fun total_coins(pool: &Pool): u64 {
-    pool.total_coins
+
public fun total_coins(self: &Pool): u64 {
+    self.total_coins
 }
 
@@ -350,10 +350,10 @@ Return pool's total balance of coins. ## Function `total_shares` -Return the total number of shares across all shareholders in pool. +Return the total number of shares across all shareholders in self. -
public fun total_shares(pool: &pool_u64_unbound::Pool): u128
+
public fun total_shares(self: &pool_u64_unbound::Pool): u128
 
@@ -362,8 +362,8 @@ Return the total number of shares across all shareholders in pool. Implementation -
public fun total_shares(pool: &Pool): u128 {
-    pool.total_shares
+
public fun total_shares(self: &Pool): u128 {
+    self.total_shares
 }
 
@@ -375,10 +375,10 @@ Return the total number of shares across all shareholders in pool. ## Function `contains` -Return true if shareholder is in pool. +Return true if shareholder is in self. -
public fun contains(pool: &pool_u64_unbound::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64_unbound::Pool, shareholder: address): bool
 
@@ -387,8 +387,8 @@ Return true if shareholder is in pool. Implementation -
public fun contains(pool: &Pool, shareholder: address): bool {
-    table::contains(&pool.shares, shareholder)
+
public fun contains(self: &Pool, shareholder: address): bool {
+    table::contains(&self.shares, shareholder)
 }
 
@@ -400,10 +400,10 @@ Return true if shareholder is in pool. ## Function `shares` -Return the number of shares of stakeholder in pool. +Return the number of shares of stakeholder in self. -
public fun shares(pool: &pool_u64_unbound::Pool, shareholder: address): u128
+
public fun shares(self: &pool_u64_unbound::Pool, shareholder: address): u128
 
@@ -412,9 +412,9 @@ Return the number of shares of stakeholder in pool. Implementation -
public fun shares(pool: &Pool, shareholder: address): u128 {
-    if (contains(pool, shareholder)) {
-        *table::borrow(&pool.shares, shareholder)
+
public fun shares(self: &Pool, shareholder: address): u128 {
+    if (contains(self, shareholder)) {
+        *table::borrow(&self.shares, shareholder)
     } else {
         0
     }
@@ -429,10 +429,10 @@ Return the number of shares of stakeholder in pool.
 
 ## Function `balance`
 
-Return the balance in coins of shareholder in pool.
+Return the balance in coins of shareholder in self.
 
 
-
public fun balance(pool: &pool_u64_unbound::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64_unbound::Pool, shareholder: address): u64
 
@@ -441,9 +441,9 @@ Return the balance in coins of shareholder in pool. Implementation -
public fun balance(pool: &Pool, shareholder: address): u64 {
-    let num_shares = shares(pool, shareholder);
-    shares_to_amount(pool, num_shares)
+
public fun balance(self: &Pool, shareholder: address): u64 {
+    let num_shares = shares(self, shareholder);
+    shares_to_amount(self, num_shares)
 }
 
@@ -455,10 +455,10 @@ Return the balance in coins of shareholder in pool. ## Function `shareholders_count` -Return the number of shareholders in pool. +Return the number of shareholders in self. -
public fun shareholders_count(pool: &pool_u64_unbound::Pool): u64
+
public fun shareholders_count(self: &pool_u64_unbound::Pool): u64
 
@@ -467,8 +467,8 @@ Return the number of shareholders in pool. Implementation -
public fun shareholders_count(pool: &Pool): u64 {
-    table::length(&pool.shares)
+
public fun shareholders_count(self: &Pool): u64 {
+    table::length(&self.shares)
 }
 
@@ -480,10 +480,10 @@ Return the number of shareholders in pool. ## Function `update_total_coins` -Update pool's total balance of coins. +Update self's total balance of coins. -
public fun update_total_coins(pool: &mut pool_u64_unbound::Pool, new_total_coins: u64)
+
public fun update_total_coins(self: &mut pool_u64_unbound::Pool, new_total_coins: u64)
 
@@ -492,8 +492,8 @@ Update pool's total balance of coins. Implementation -
public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) {
-    pool.total_coins = new_total_coins;
+
public fun update_total_coins(self: &mut Pool, new_total_coins: u64) {
+    self.total_coins = new_total_coins;
 }
 
@@ -508,7 +508,7 @@ Update pool's total balance of coins. Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. -
public fun buy_in(pool: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
public fun buy_in(self: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
 
@@ -517,16 +517,16 @@ Allow an existing or new shareholder to add their coins to the pool in exchange Implementation -
public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u128 {
+
public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 {
     if (coins_amount == 0) return 0;
 
-    let new_shares = amount_to_shares(pool, coins_amount);
-    assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
-    assert!(MAX_U128 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW));
+    let new_shares = amount_to_shares(self, coins_amount);
+    assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    assert!(MAX_U128 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW));
 
-    pool.total_coins = pool.total_coins + coins_amount;
-    pool.total_shares = pool.total_shares + new_shares;
-    add_shares(pool, shareholder, new_shares);
+    self.total_coins = self.total_coins + coins_amount;
+    self.total_shares = self.total_shares + new_shares;
+    add_shares(self, shareholder, new_shares);
     new_shares
 }
 
@@ -539,11 +539,11 @@ Allow an existing or new shareholder to add their coins to the pool in exchange ## Function `add_shares` -Add the number of shares directly for shareholder in pool. +Add the number of shares directly for shareholder in self. This would dilute other shareholders if the pool's balance of coins didn't change. -
fun add_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
+
fun add_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
 
@@ -552,16 +552,16 @@ This would dilute other shareholders if the pool's balance of coins didn't chang Implementation -
fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u128): u128 {
-    if (contains(pool, shareholder)) {
-        let existing_shares = table::borrow_mut(&mut pool.shares, shareholder);
+
fun add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 {
+    if (contains(self, shareholder)) {
+        let existing_shares = table::borrow_mut(&mut self.shares, shareholder);
         let current_shares = *existing_shares;
         assert!(MAX_U128 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW));
 
         *existing_shares = current_shares + new_shares;
         *existing_shares
     } else if (new_shares > 0) {
-        table::add(&mut pool.shares, shareholder, new_shares);
+        table::add(&mut self.shares, shareholder, new_shares);
         new_shares
     } else {
         new_shares
@@ -577,10 +577,10 @@ This would dilute other shareholders if the pool's balance of coins didn't chang
 
 ## Function `redeem_shares`
 
-Allow shareholder to redeem their shares in pool for coins.
+Allow shareholder to redeem their shares in self for coins.
 
 
-
public fun redeem_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
+
public fun redeem_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
 
@@ -589,16 +589,16 @@ Allow shareholder to redeem their shares in pool for c Implementation -
public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
+
public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
 
     if (shares_to_redeem == 0) return 0;
 
-    let redeemed_coins = shares_to_amount(pool, shares_to_redeem);
-    pool.total_coins = pool.total_coins - redeemed_coins;
-    pool.total_shares = pool.total_shares - shares_to_redeem;
-    deduct_shares(pool, shareholder, shares_to_redeem);
+    let redeemed_coins = shares_to_amount(self, shares_to_redeem);
+    self.total_coins = self.total_coins - redeemed_coins;
+    self.total_shares = self.total_shares - shares_to_redeem;
+    deduct_shares(self, shareholder, shares_to_redeem);
 
     redeemed_coins
 }
@@ -615,7 +615,7 @@ Allow shareholder to redeem their shares in pool for c
 Transfer shares from shareholder_1 to shareholder_2.
 
 
-
public fun transfer_shares(pool: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
+
public fun transfer_shares(self: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
 
@@ -625,17 +625,17 @@ Transfer shares from shareholder_1 to shareholder_2.
public fun transfer_shares(
-    pool: &mut Pool,
+    self: &mut Pool,
     shareholder_1: address,
     shareholder_2: address,
     shares_to_transfer: u128,
 ) {
-    assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
+    assert!(contains(self, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
     if (shares_to_transfer == 0) return;
 
-    deduct_shares(pool, shareholder_1, shares_to_transfer);
-    add_shares(pool, shareholder_2, shares_to_transfer);
+    deduct_shares(self, shareholder_1, shares_to_transfer);
+    add_shares(self, shareholder_2, shares_to_transfer);
 }
 
@@ -647,10 +647,10 @@ Transfer shares from shareholder_1 to shareholder_2. ## Function `deduct_shares` -Directly deduct shareholder's number of shares in pool and return the number of remaining shares. +Directly deduct shareholder's number of shares in self and return the number of remaining shares. -
fun deduct_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
+
fun deduct_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
 
@@ -659,17 +659,17 @@ Directly deduct shareholder's number of shares in pool Implementation -
fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u128): u128 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
+
fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
 
-    let existing_shares = table::borrow_mut(&mut pool.shares, shareholder);
+    let existing_shares = table::borrow_mut(&mut self.shares, shareholder);
     *existing_shares = *existing_shares - num_shares;
 
     // Remove the shareholder completely if they have no shares left.
     let remaining_shares = *existing_shares;
     if (remaining_shares == 0) {
-        table::remove(&mut pool.shares, shareholder);
+        table::remove(&mut self.shares, shareholder);
     };
 
     remaining_shares
@@ -684,11 +684,11 @@ Directly deduct shareholder's number of shares in pool
 
 ## Function `amount_to_shares`
 
-Return the number of new shares coins_amount can buy in pool.
+Return the number of new shares coins_amount can buy in self.
 amount needs to big enough to avoid rounding number.
 
 
-
public fun amount_to_shares(pool: &pool_u64_unbound::Pool, coins_amount: u64): u128
+
public fun amount_to_shares(self: &pool_u64_unbound::Pool, coins_amount: u64): u128
 
@@ -697,8 +697,8 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares(pool: &Pool, coins_amount: u64): u128 {
-    amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins)
+
public fun amount_to_shares(self: &Pool, coins_amount: u64): u128 {
+    amount_to_shares_with_total_coins(self, coins_amount, self.total_coins)
 }
 
@@ -710,11 +710,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `amount_to_shares_with_total_coins` -Return the number of new shares coins_amount can buy in pool with a custom total coins number. +Return the number of new shares coins_amount can buy in self with a custom total coins number. amount needs to big enough to avoid rounding number. -
public fun amount_to_shares_with_total_coins(pool: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
+
public fun amount_to_shares_with_total_coins(self: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
 
@@ -723,17 +723,17 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u128 {
+
public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 {
     // No shares yet so amount is worth the same number of shares.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems.
         // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow.
-        to_u128(coins_amount) * to_u128(pool.scaling_factor)
+        to_u128(coins_amount) * to_u128(self.scaling_factor)
     } else {
         // Shares price = total_coins / total existing shares.
         // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        multiply_then_divide(pool, to_u128(coins_amount), pool.total_shares, to_u128(total_coins))
+        multiply_then_divide(self, to_u128(coins_amount), self.total_shares, to_u128(total_coins))
     }
 }
 
@@ -746,11 +746,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `shares_to_amount` -Return the number of coins shares are worth in pool. +Return the number of coins shares are worth in self. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount(pool: &pool_u64_unbound::Pool, shares: u128): u64
+
public fun shares_to_amount(self: &pool_u64_unbound::Pool, shares: u128): u64
 
@@ -759,8 +759,8 @@ Return the number of coins shares are worth in pool. Implementation -
public fun shares_to_amount(pool: &Pool, shares: u128): u64 {
-    shares_to_amount_with_total_coins(pool, shares, pool.total_coins)
+
public fun shares_to_amount(self: &Pool, shares: u128): u64 {
+    shares_to_amount_with_total_coins(self, shares, self.total_coins)
 }
 
@@ -772,11 +772,11 @@ Return the number of coins shares are worth in pool. ## Function `shares_to_amount_with_total_coins` -Return the number of coins shares are worth in pool with a custom total coins number. +Return the number of coins shares are worth in self with a custom total coins number. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount_with_total_coins(pool: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
 
@@ -785,15 +785,15 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u128, total_coins: u64): u64 {
+
public fun shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 {
     // No shares or coins yet so shares are worthless.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         0
     } else {
         // Shares price = total_coins / total existing shares.
         // Shares worth = shares * shares price = shares * total_coins / total existing shares.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        (multiply_then_divide(pool, shares, to_u128(total_coins), pool.total_shares) as u64)
+        (multiply_then_divide(self, shares, to_u128(total_coins), self.total_shares) as u64)
     }
 }
 
@@ -809,7 +809,7 @@ Return the number of coins shares are worth in pool wi Return the number of coins shares are worth in pool with custom total coins and shares numbers. -
public fun shares_to_amount_with_total_stats(pool: &pool_u64_unbound::Pool, shares: u128, total_coins: u64, total_shares: u128): u64
+
public fun shares_to_amount_with_total_stats(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64, total_shares: u128): u64
 
@@ -819,15 +819,15 @@ Return the number of coins shares are worth in pool wi
public fun shares_to_amount_with_total_stats(
-    pool: &Pool,
+    self: &Pool,
     shares: u128,
     total_coins: u64,
     total_shares: u128,
 ): u64 {
-    if (pool.total_coins == 0 || total_shares == 0) {
+    if (self.total_coins == 0 || total_shares == 0) {
         0
     } else {
-        (multiply_then_divide(pool, shares, to_u128(total_coins), total_shares) as u64)
+        (multiply_then_divide(self, shares, to_u128(total_coins), total_shares) as u64)
     }
 }
 
@@ -842,7 +842,7 @@ Return the number of coins shares are worth in pool wi -
public fun multiply_then_divide(_pool: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
+
public fun multiply_then_divide(self: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
 
@@ -851,7 +851,7 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun multiply_then_divide(_pool: &Pool, x: u128, y: u128, z: u128): u128 {
+
public fun multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 {
     let result = (to_u256(x) * to_u256(y)) / to_u256(z);
     (result as u128)
 }
@@ -975,14 +975,14 @@ Return the number of coins shares are worth in pool wi
 ### Function `contains`
 
 
-
public fun contains(pool: &pool_u64_unbound::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64_unbound::Pool, shareholder: address): bool
 
aborts_if false;
-ensures result == spec_contains(pool, shareholder);
+ensures result == spec_contains(self, shareholder);
 
@@ -1008,14 +1008,14 @@ Return the number of coins shares are worth in pool wi ### Function `shares` -
public fun shares(pool: &pool_u64_unbound::Pool, shareholder: address): u128
+
public fun shares(self: &pool_u64_unbound::Pool, shareholder: address): u128
 
aborts_if false;
-ensures result == spec_shares(pool, shareholder);
+ensures result == spec_shares(self, shareholder);
 
@@ -1025,16 +1025,16 @@ Return the number of coins shares are worth in pool wi ### Function `balance` -
public fun balance(pool: &pool_u64_unbound::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64_unbound::Pool, shareholder: address): u64
 
-
let shares = spec_shares(pool, shareholder);
-let total_coins = pool.total_coins;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
let shares = spec_shares(self, shareholder);
+let total_coins = self.total_coins;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1044,19 +1044,19 @@ Return the number of coins shares are worth in pool wi ### Function `buy_in` -
public fun buy_in(pool: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
public fun buy_in(self: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
 
-
let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins);
-aborts_if pool.total_coins + coins_amount > MAX_U64;
-aborts_if pool.total_shares + new_shares > MAX_U128;
+
let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins);
+aborts_if self.total_coins + coins_amount > MAX_U64;
+aborts_if self.total_shares + new_shares > MAX_U128;
 include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares };
 include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares };
-ensures pool.total_coins == old(pool.total_coins) + coins_amount;
-ensures pool.total_shares == old(pool.total_shares) + new_shares;
+ensures self.total_coins == old(self.total_coins) + coins_amount;
+ensures self.total_shares == old(self.total_shares) + new_shares;
 ensures result == new_shares;
 
@@ -1067,7 +1067,7 @@ Return the number of coins shares are worth in pool wi ### Function `add_shares` -
fun add_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
+
fun add_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
 
@@ -1075,8 +1075,8 @@ Return the number of coins shares are worth in pool wi
include AddSharesAbortsIf;
 include AddSharesEnsures;
-let key_exists = table::spec_contains(pool.shares, shareholder);
-ensures result == if (key_exists) { table::spec_get(pool.shares, shareholder) }
+let key_exists = table::spec_contains(self.shares, shareholder);
+ensures result == if (key_exists) { table::spec_get(self.shares, shareholder) }
 else { new_shares };
 
@@ -1087,11 +1087,11 @@ Return the number of coins shares are worth in pool wi
schema AddSharesAbortsIf {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = table::spec_contains(pool.shares, shareholder);
-    let current_shares = table::spec_get(pool.shares, shareholder);
+    let key_exists = table::spec_contains(self.shares, shareholder);
+    let current_shares = table::spec_get(self.shares, shareholder);
     aborts_if key_exists && current_shares + new_shares > MAX_U128;
 }
 
@@ -1103,15 +1103,15 @@ Return the number of coins shares are worth in pool wi
schema AddSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = table::spec_contains(pool.shares, shareholder);
-    let current_shares = table::spec_get(pool.shares, shareholder);
+    let key_exists = table::spec_contains(self.shares, shareholder);
+    let current_shares = table::spec_get(self.shares, shareholder);
     ensures key_exists ==>
-        pool.shares == table::spec_set(old(pool.shares), shareholder, current_shares + new_shares);
+        self.shares == table::spec_set(old(self.shares), shareholder, current_shares + new_shares);
     ensures (!key_exists && new_shares > 0) ==>
-        pool.shares == table::spec_set(old(pool.shares), shareholder, new_shares);
+        self.shares == table::spec_set(old(self.shares), shareholder, new_shares);
 }
 
@@ -1138,20 +1138,22 @@ Return the number of coins shares are worth in pool wi ### Function `redeem_shares` -
public fun redeem_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
+
public fun redeem_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
 
-
let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins);
-aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < shares_to_redeem;
-aborts_if pool.total_coins < redeemed_coins;
-aborts_if pool.total_shares < shares_to_redeem;
-ensures pool.total_coins == old(pool.total_coins) - redeemed_coins;
-ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem;
-include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem };
+
let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins);
+aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < shares_to_redeem;
+aborts_if self.total_coins < redeemed_coins;
+aborts_if self.total_shares < shares_to_redeem;
+ensures self.total_coins == old(self.total_coins) - redeemed_coins;
+ensures self.total_shares == old(self.total_shares) - shares_to_redeem;
+include shares_to_redeem > 0 ==> DeductSharesEnsures {
+    num_shares: shares_to_redeem
+};
 ensures result == redeemed_coins;
 
@@ -1162,27 +1164,28 @@ Return the number of coins shares are worth in pool wi ### Function `transfer_shares` -
public fun transfer_shares(pool: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
+
public fun transfer_shares(self: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
 
-
aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(pool, shareholder_2) &&
-    (spec_shares(pool, shareholder_2) + shares_to_transfer > MAX_U128);
-aborts_if !spec_contains(pool, shareholder_1);
-aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer;
-ensures shareholder_1 == shareholder_2 ==> spec_shares(old(pool), shareholder_1) == spec_shares(pool, shareholder_1);
-ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) == shares_to_transfer)) ==>
-    !spec_contains(pool, shareholder_1);
+
aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(self, shareholder_2) &&
+    (spec_shares(self, shareholder_2) + shares_to_transfer > MAX_U128);
+aborts_if !spec_contains(self, shareholder_1);
+aborts_if spec_shares(self, shareholder_1) < shares_to_transfer;
+ensures shareholder_1 == shareholder_2 ==> spec_shares(old(self), shareholder_1) == spec_shares(
+    self, shareholder_1);
+ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) == shares_to_transfer)) ==>
+    !spec_contains(self, shareholder_1);
 ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0) ==>
-    (spec_contains(pool, shareholder_2));
-ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(pool), shareholder_2)) ==>
-    (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == shares_to_transfer);
-ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(pool), shareholder_2)) ==>
-    (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == spec_shares(old(pool), shareholder_2) + shares_to_transfer);
-ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) > shares_to_transfer)) ==>
-    (spec_contains(pool, shareholder_1) && (spec_shares(pool, shareholder_1) == spec_shares(old(pool), shareholder_1) - shares_to_transfer));
+    (spec_contains(self, shareholder_2));
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(self), shareholder_2)) ==>
+    (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == shares_to_transfer);
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(self), shareholder_2)) ==>
+    (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == spec_shares(old(self), shareholder_2) + shares_to_transfer);
+ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) > shares_to_transfer)) ==>
+    (spec_contains(self, shareholder_1) && (spec_shares(self, shareholder_1) == spec_shares(old(self), shareholder_1) - shares_to_transfer));
 
@@ -1192,17 +1195,17 @@ Return the number of coins shares are worth in pool wi ### Function `deduct_shares` -
fun deduct_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
+
fun deduct_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
 
-
aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < num_shares;
+
aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < num_shares;
 include DeductSharesEnsures;
-let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares;
-ensures remaining_shares > 0 ==> result == table::spec_get(pool.shares, shareholder);
+let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares;
+ensures remaining_shares > 0 ==> result == table::spec_get(self.shares, shareholder);
 ensures remaining_shares == 0 ==> result == 0;
 
@@ -1213,12 +1216,12 @@ Return the number of coins shares are worth in pool wi
schema DeductSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     num_shares: u64;
-    let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares;
-    ensures remaining_shares > 0 ==> table::spec_get(pool.shares, shareholder) == remaining_shares;
-    ensures remaining_shares == 0 ==> !table::spec_contains(pool.shares, shareholder);
+    let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares;
+    ensures remaining_shares > 0 ==> table::spec_get(self.shares, shareholder) == remaining_shares;
+    ensures remaining_shares == 0 ==> !table::spec_contains(self.shares, shareholder);
 }
 
@@ -1229,18 +1232,18 @@ Return the number of coins shares are worth in pool wi ### Function `amount_to_shares_with_total_coins` -
public fun amount_to_shares_with_total_coins(pool: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
+
public fun amount_to_shares_with_total_coins(self: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (coins_amount * pool.total_shares) / total_coins > MAX_U128;
-aborts_if (pool.total_coins == 0 || pool.total_shares == 0)
-    && coins_amount * pool.scaling_factor > MAX_U128;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0;
-ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (coins_amount * self.total_shares) / total_coins > MAX_U128;
+aborts_if (self.total_coins == 0 || self.total_shares == 0)
+    && coins_amount * self.scaling_factor > MAX_U128;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0;
+ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins);
 
@@ -1250,15 +1253,15 @@ Return the number of coins shares are worth in pool wi ### Function `shares_to_amount_with_total_coins` -
public fun shares_to_amount_with_total_coins(pool: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1284,7 +1287,7 @@ Return the number of coins shares are worth in pool wi ### Function `multiply_then_divide` -
public fun multiply_then_divide(_pool: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
+
public fun multiply_then_divide(self: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/simple_map.md b/aptos-move/framework/aptos-stdlib/doc/simple_map.md index ea5040273580b..3fc1a1a0390cb 100644 --- a/aptos-move/framework/aptos-stdlib/doc/simple_map.md +++ b/aptos-move/framework/aptos-stdlib/doc/simple_map.md @@ -149,7 +149,7 @@ Map key is not found -
public fun length<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>): u64
+
public fun length<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>): u64
 
@@ -158,8 +158,8 @@ Map key is not found Implementation -
public fun length<Key: store, Value: store>(map: &SimpleMap<Key, Value>): u64 {
-    vector::length(&map.data)
+
public fun length<Key: store, Value: store>(self: &SimpleMap<Key, Value>): u64 {
+    vector::length(&self.data)
 }
 
@@ -257,7 +257,7 @@ This function is deprecated, use new instead. -
public fun borrow<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
+
public fun borrow<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
 
@@ -267,13 +267,13 @@ This function is deprecated, use new instead.
public fun borrow<Key: store, Value: store>(
-    map: &SimpleMap<Key, Value>,
+    self: &SimpleMap<Key, Value>,
     key: &Key,
 ): &Value {
-    let maybe_idx = find(map, key);
+    let maybe_idx = find(self, key);
     assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
     let idx = option::extract(&mut maybe_idx);
-    &vector::borrow(&map.data, idx).value
+    &vector::borrow(&self.data, idx).value
 }
 
@@ -287,7 +287,7 @@ This function is deprecated, use new instead. -
public fun borrow_mut<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
+
public fun borrow_mut<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
 
@@ -297,13 +297,13 @@ This function is deprecated, use new instead.
public fun borrow_mut<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: &Key,
 ): &mut Value {
-    let maybe_idx = find(map, key);
+    let maybe_idx = find(self, key);
     assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
     let idx = option::extract(&mut maybe_idx);
-    &mut vector::borrow_mut(&mut map.data, idx).value
+    &mut vector::borrow_mut(&mut self.data, idx).value
 }
 
@@ -317,7 +317,7 @@ This function is deprecated, use new instead. -
public fun contains_key<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
+
public fun contains_key<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
 
@@ -327,10 +327,10 @@ This function is deprecated, use new instead.
public fun contains_key<Key: store, Value: store>(
-    map: &SimpleMap<Key, Value>,
+    self: &SimpleMap<Key, Value>,
     key: &Key,
 ): bool {
-    let maybe_idx = find(map, key);
+    let maybe_idx = find(self, key);
     option::is_some(&maybe_idx)
 }
 
@@ -345,7 +345,7 @@ This function is deprecated, use new instead. -
public fun destroy_empty<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>)
+
public fun destroy_empty<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>)
 
@@ -354,8 +354,8 @@ This function is deprecated, use new instead. Implementation -
public fun destroy_empty<Key: store, Value: store>(map: SimpleMap<Key, Value>) {
-    let SimpleMap { data } = map;
+
public fun destroy_empty<Key: store, Value: store>(self: SimpleMap<Key, Value>) {
+    let SimpleMap { data } = self;
     vector::destroy_empty(data);
 }
 
@@ -371,7 +371,7 @@ This function is deprecated, use new instead. Add a key/value pair to the map. The key must not already exist. -
public fun add<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
+
public fun add<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
 
@@ -381,14 +381,14 @@ Add a key/value pair to the map. The key must not already exist.
public fun add<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: Key,
     value: Value,
 ) {
-    let maybe_idx = find(map, &key);
+    let maybe_idx = find(self, &key);
     assert!(option::is_none(&maybe_idx), error::invalid_argument(EKEY_ALREADY_EXISTS));
 
-    vector::push_back(&mut map.data, Element { key, value });
+    vector::push_back(&mut self.data, Element { key, value });
 }
 
@@ -403,7 +403,7 @@ Add a key/value pair to the map. The key must not already exist. Add multiple key/value pairs to the map. The keys must not already exist. -
public fun add_all<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
public fun add_all<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
 
@@ -413,12 +413,12 @@ Add multiple key/value pairs to the map. The keys must not already exist.
public fun add_all<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     keys: vector<Key>,
     values: vector<Value>,
 ) {
     vector::zip(keys, values, |key, value| {
-        add(map, key, value);
+        add(self, key, value);
     });
 }
 
@@ -434,7 +434,7 @@ Add multiple key/value pairs to the map. The keys must not already exist. Insert key/value pair or update an existing key to a new value -
public fun upsert<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
+
public fun upsert<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
 
@@ -444,11 +444,11 @@ Insert key/value pair or update an existing key to a new value
public fun upsert<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: Key,
     value: Value
 ): (std::option::Option<Key>, std::option::Option<Value>) {
-    let data = &mut map.data;
+    let data = &mut self.data;
     let len = vector::length(data);
     let i = 0;
     while (i < len) {
@@ -461,7 +461,7 @@ Insert key/value pair or update an existing key to a new value
         };
         i = i + 1;
     };
-    vector::push_back(&mut map.data, Element { key, value });
+    vector::push_back(&mut self.data, Element { key, value });
     (std::option::none(), std::option::none())
 }
 
@@ -477,7 +477,7 @@ Insert key/value pair or update an existing key to a new value Return all keys in the map. This requires keys to be copyable. -
public fun keys<Key: copy, Value>(map: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
public fun keys<Key: copy, Value>(self: &simple_map::SimpleMap<Key, Value>): vector<Key>
 
@@ -486,8 +486,8 @@ Return all keys in the map. This requires keys to be copyable. Implementation -
public fun keys<Key: copy, Value>(map: &SimpleMap<Key, Value>): vector<Key> {
-    vector::map_ref(&map.data, |e| {
+
public fun keys<Key: copy, Value>(self: &SimpleMap<Key, Value>): vector<Key> {
+    vector::map_ref(&self.data, |e| {
         let e: &Element<Key, Value> = e;
         e.key
     })
@@ -505,7 +505,7 @@ Return all keys in the map. This requires keys to be copyable.
 Return all values in the map. This requires values to be copyable.
 
 
-
public fun values<Key, Value: copy>(map: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
public fun values<Key, Value: copy>(self: &simple_map::SimpleMap<Key, Value>): vector<Value>
 
@@ -514,8 +514,8 @@ Return all values in the map. This requires values to be copyable. Implementation -
public fun values<Key, Value: copy>(map: &SimpleMap<Key, Value>): vector<Value> {
-    vector::map_ref(&map.data, |e| {
+
public fun values<Key, Value: copy>(self: &SimpleMap<Key, Value>): vector<Value> {
+    vector::map_ref(&self.data, |e| {
         let e: &Element<Key, Value> = e;
         e.value
     })
@@ -534,7 +534,7 @@ Transform the map into two vectors with the keys and values respectively
 Primarily used to destroy a map
 
 
-
public fun to_vec_pair<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
+
public fun to_vec_pair<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
 
@@ -544,10 +544,10 @@ Primarily used to destroy a map
public fun to_vec_pair<Key: store, Value: store>(
-    map: SimpleMap<Key, Value>): (vector<Key>, vector<Value>) {
+    self: SimpleMap<Key, Value>): (vector<Key>, vector<Value>) {
     let keys: vector<Key> = vector::empty();
     let values: vector<Value> = vector::empty();
-    let SimpleMap { data } = map;
+    let SimpleMap { data } = self;
     vector::for_each(data, |e| {
         let Element { key, value } = e;
         vector::push_back(&mut keys, key);
@@ -569,7 +569,7 @@ For maps that cannot be dropped this is a utility to destroy them
 using lambdas to destroy the individual keys and values.
 
 
-
public fun destroy<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>, dk: |Key|, dv: |Value|)
+
public fun destroy<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>, dk: |Key|, dv: |Value|)
 
@@ -579,11 +579,11 @@ using lambdas to destroy the individual keys and values.
public inline fun destroy<Key: store, Value: store>(
-    map: SimpleMap<Key, Value>,
+    self: SimpleMap<Key, Value>,
     dk: |Key|,
     dv: |Value|
 ) {
-    let (keys, values) = to_vec_pair(map);
+    let (keys, values) = to_vec_pair(self);
     vector::destroy(keys, |_k| dk(_k));
     vector::destroy(values, |_v| dv(_v));
 }
@@ -600,7 +600,7 @@ using lambdas to destroy the individual keys and values.
 Remove a key/value pair from the map. The key must exist.
 
 
-
public fun remove<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
+
public fun remove<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
 
@@ -610,13 +610,13 @@ Remove a key/value pair from the map. The key must exist.
public fun remove<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: &Key,
 ): (Key, Value) {
-    let maybe_idx = find(map, key);
+    let maybe_idx = find(self, key);
     assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
     let placement = option::extract(&mut maybe_idx);
-    let Element { key, value } = vector::swap_remove(&mut map.data, placement);
+    let Element { key, value } = vector::swap_remove(&mut self.data, placement);
     (key, value)
 }
 
@@ -631,7 +631,7 @@ Remove a key/value pair from the map. The key must exist. -
fun find<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
+
fun find<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
 
@@ -641,13 +641,13 @@ Remove a key/value pair from the map. The key must exist.
fun find<Key: store, Value: store>(
-    map: &SimpleMap<Key, Value>,
+    self: &SimpleMap<Key, Value>,
     key: &Key,
 ): option::Option<u64> {
-    let leng = vector::length(&map.data);
+    let leng = vector::length(&self.data);
     let i = 0;
     while (i < leng) {
-        let element = vector::borrow(&map.data, i);
+        let element = vector::borrow(&self.data, i);
         if (&element.key == key) {
             return option::some(i)
         };
@@ -710,7 +710,7 @@ Remove a key/value pair from the map. The key must exist.
 ### Function `length`
 
 
-
public fun length<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>): u64
+
public fun length<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>): u64
 
@@ -785,7 +785,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `borrow` -
public fun borrow<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
+
public fun borrow<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
 
@@ -801,7 +801,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `borrow_mut` -
public fun borrow_mut<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
+
public fun borrow_mut<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
 
@@ -817,7 +817,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `contains_key` -
public fun contains_key<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
+
public fun contains_key<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
 
@@ -833,7 +833,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `destroy_empty` -
public fun destroy_empty<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>)
+
public fun destroy_empty<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>)
 
@@ -849,7 +849,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `add` -
public fun add<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
+
public fun add<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
 
@@ -865,7 +865,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `add_all` -
public fun add_all<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
public fun add_all<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
 
@@ -881,7 +881,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `upsert` -
public fun upsert<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
+
public fun upsert<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
 
@@ -890,12 +890,14 @@ Remove a key/value pair from the map. The key must exist.
pragma intrinsic;
 pragma opaque;
 aborts_if [abstract] false;
-ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_1);
-ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_2);
-ensures [abstract] spec_contains_key(map, key);
-ensures [abstract] spec_get(map, key) == value;
-ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key));
-ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(map), key)));
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_1);
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_2);
+ensures [abstract] spec_contains_key(self, key);
+ensures [abstract] spec_get(self, key) == value;
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key));
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(
+    self
+), key)));
 
@@ -950,7 +952,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `keys` -
public fun keys<Key: copy, Value>(map: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
public fun keys<Key: copy, Value>(self: &simple_map::SimpleMap<Key, Value>): vector<Key>
 
@@ -966,7 +968,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `values` -
public fun values<Key, Value: copy>(map: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
public fun values<Key, Value: copy>(self: &simple_map::SimpleMap<Key, Value>): vector<Value>
 
@@ -982,7 +984,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `to_vec_pair` -
public fun to_vec_pair<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
+
public fun to_vec_pair<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
 
@@ -992,9 +994,9 @@ Remove a key/value pair from the map. The key must exist. pragma opaque; ensures [abstract] forall k: Key: vector::spec_contains(result_1, k) <==> - spec_contains_key(map, k); + spec_contains_key(self, k); ensures [abstract] forall i in 0..len(result_1): - spec_get(map, vector::borrow(result_1, i)) == vector::borrow(result_2, i); + spec_get(self, vector::borrow(result_1, i)) == vector::borrow(result_2, i);
@@ -1004,7 +1006,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `remove` -
public fun remove<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
+
public fun remove<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
 
@@ -1020,7 +1022,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `find` -
fun find<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
+
fun find<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/smart_table.md b/aptos-move/framework/aptos-stdlib/doc/smart_table.md index d69931fabb804..ac5388661f2ca 100644 --- a/aptos-move/framework/aptos-stdlib/doc/smart_table.md +++ b/aptos-move/framework/aptos-stdlib/doc/smart_table.md @@ -360,7 +360,7 @@ Destroy empty table. Aborts if it's not empty. -
public fun destroy_empty<K, V>(table: smart_table::SmartTable<K, V>)
+
public fun destroy_empty<K, V>(self: smart_table::SmartTable<K, V>)
 
@@ -369,14 +369,14 @@ Aborts if it's not empty. Implementation -
public fun destroy_empty<K, V>(table: SmartTable<K, V>) {
-    assert!(table.size == 0, error::invalid_argument(ENOT_EMPTY));
+
public fun destroy_empty<K, V>(self: SmartTable<K, V>) {
+    assert!(self.size == 0, error::invalid_argument(ENOT_EMPTY));
     let i = 0;
-    while (i < table.num_buckets) {
-        vector::destroy_empty(table_with_length::remove(&mut table.buckets, i));
+    while (i < self.num_buckets) {
+        vector::destroy_empty(table_with_length::remove(&mut self.buckets, i));
         i = i + 1;
     };
-    let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = table;
+    let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = self;
     table_with_length::destroy_empty(buckets);
 }
 
@@ -392,7 +392,7 @@ Aborts if it's not empty. Destroy a table completely when V has drop. -
public fun destroy<K: drop, V: drop>(table: smart_table::SmartTable<K, V>)
+
public fun destroy<K: drop, V: drop>(self: smart_table::SmartTable<K, V>)
 
@@ -401,9 +401,9 @@ Destroy a table completely when V has drop. Implementation -
public fun destroy<K: drop, V: drop>(table: SmartTable<K, V>) {
-    clear(&mut table);
-    destroy_empty(table);
+
public fun destroy<K: drop, V: drop>(self: SmartTable<K, V>) {
+    clear(&mut self);
+    destroy_empty(self);
 }
 
@@ -418,7 +418,7 @@ Destroy a table completely when V has drop. Clear a table completely when T has drop. -
public fun clear<K: drop, V: drop>(table: &mut smart_table::SmartTable<K, V>)
+
public fun clear<K: drop, V: drop>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -427,16 +427,16 @@ Clear a table completely when T has drop. Implementation -
public fun clear<K: drop, V: drop>(table: &mut SmartTable<K, V>) {
-    *table_with_length::borrow_mut(&mut table.buckets, 0) = vector::empty();
+
public fun clear<K: drop, V: drop>(self: &mut SmartTable<K, V>) {
+    *table_with_length::borrow_mut(&mut self.buckets, 0) = vector::empty();
     let i = 1;
-    while (i < table.num_buckets) {
-        table_with_length::remove(&mut table.buckets, i);
+    while (i < self.num_buckets) {
+        table_with_length::remove(&mut self.buckets, i);
         i = i + 1;
     };
-    table.num_buckets = 1;
-    table.level = 0;
-    table.size = 0;
+    self.num_buckets = 1;
+    self.level = 0;
+    self.size = 0;
 }
 
@@ -455,7 +455,7 @@ Abort if key already exists. Note: This method may occasionally cost much more gas when triggering bucket split. -
public fun add<K, V>(table: &mut smart_table::SmartTable<K, V>, key: K, value: V)
+
public fun add<K, V>(self: &mut smart_table::SmartTable<K, V>, key: K, value: V)
 
@@ -464,10 +464,10 @@ Note: This method may occasionally cost much more gas when triggering bucket spl Implementation -
public fun add<K, V>(table: &mut SmartTable<K, V>, key: K, value: V) {
+
public fun add<K, V>(self: &mut SmartTable<K, V>, key: K, value: V) {
     let hash = sip_hash_from_value(&key);
-    let index = bucket_index(table.level, table.num_buckets, hash);
-    let bucket = table_with_length::borrow_mut(&mut table.buckets, index);
+    let index = bucket_index(self.level, self.num_buckets, hash);
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, index);
     // We set a per-bucket limit here with a upper bound (10000) that nobody should normally reach.
     assert!(vector::length(bucket) <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE));
     assert!(vector::all(bucket, | entry | {
@@ -475,15 +475,15 @@ Note: This method may occasionally cost much more gas when triggering bucket spl
         &e.key != &key
     }), error::invalid_argument(EALREADY_EXIST));
     let e = Entry { hash, key, value };
-    if (table.target_bucket_size == 0) {
+    if (self.target_bucket_size == 0) {
         let estimated_entry_size = max(size_of_val(&e), 1);
-        table.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1);
+        self.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1);
     };
     vector::push_back(bucket, e);
-    table.size = table.size + 1;
+    self.size = self.size + 1;
 
-    if (load_factor(table) >= (table.split_load_threshold as u64)) {
-        split_one_bucket(table);
+    if (load_factor(self) >= (self.split_load_threshold as u64)) {
+        split_one_bucket(self);
     }
 }
 
@@ -499,7 +499,7 @@ Note: This method may occasionally cost much more gas when triggering bucket spl Add multiple key/value pairs to the smart table. The keys must not already exist. -
public fun add_all<K, V>(table: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
+
public fun add_all<K, V>(self: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
 
@@ -508,8 +508,8 @@ Add multiple key/value pairs to the smart table. The keys must not already exist Implementation -
public fun add_all<K, V>(table: &mut SmartTable<K, V>, keys: vector<K>, values: vector<V>) {
-    vector::zip(keys, values, |key, value| { add(table, key, value); });
+
public fun add_all<K, V>(self: &mut SmartTable<K, V>, keys: vector<K>, values: vector<V>) {
+    vector::zip(keys, values, |key, value| { add(self, key, value); });
 }
 
@@ -557,7 +557,7 @@ view of the whole table. Disclaimer: This function may be costly as the smart table may be huge in size. Use it at your own discretion. -
public fun to_simple_map<K: copy, drop, store, V: copy, store>(table: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
+
public fun to_simple_map<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
 
@@ -567,12 +567,12 @@ Disclaimer: This function may be costly as the smart table may be huge in size.
public fun to_simple_map<K: store + copy + drop, V: store + copy>(
-    table: &SmartTable<K, V>,
+    self: &SmartTable<K, V>,
 ): SimpleMap<K, V> {
     let i = 0;
     let res = simple_map::new<K, V>();
-    while (i < table.num_buckets) {
-        let (keys, values) = unzip_entries(table_with_length::borrow(&table.buckets, i));
+    while (i < self.num_buckets) {
+        let (keys, values) = unzip_entries(table_with_length::borrow(&self.buckets, i));
         simple_map::add_all(&mut res, keys, values);
         i = i + 1;
     };
@@ -594,7 +594,7 @@ For a large enough smart table this function will fail due to execution gas limi
 keys_paginated should be used instead.
 
 
-
public fun keys<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>): vector<K>
+
public fun keys<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): vector<K>
 
@@ -604,9 +604,9 @@ For a large enough smart table this function will fail due to execution gas limi
public fun keys<K: store + copy + drop, V: store + copy>(
-    table_ref: &SmartTable<K, V>
+    self: &SmartTable<K, V>
 ): vector<K> {
-    let (keys, _, _) = keys_paginated(table_ref, 0, 0, length(table_ref));
+    let (keys, _, _) = keys_paginated(self, 0, 0, length(self));
     keys
 }
 
@@ -634,7 +634,7 @@ returned bucket index and vector index value options are both none, which means pagination is complete. For an example, see test_keys(). -
public fun keys_paginated<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
+
public fun keys_paginated<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
 
@@ -644,7 +644,7 @@ pagination is complete. For an example, see test_keys().
public fun keys_paginated<K: store + copy + drop, V: store + copy>(
-    table_ref: &SmartTable<K, V>,
+    self: &SmartTable<K, V>,
     starting_bucket_index: u64,
     starting_vector_index: u64,
     num_keys_to_get: u64,
@@ -653,8 +653,8 @@ pagination is complete. For an example, see test_keys().
     Option<u64>,
     Option<u64>,
 ) {
-    let num_buckets = table_ref.num_buckets;
-    let buckets_ref = &table_ref.buckets;
+    let num_buckets = self.num_buckets;
+    let buckets_ref = &self.buckets;
     assert!(starting_bucket_index < num_buckets, EINVALID_BUCKET_INDEX);
     let bucket_ref = table_with_length::borrow(buckets_ref, starting_bucket_index);
     let bucket_length = vector::length(bucket_ref);
@@ -707,7 +707,7 @@ pagination is complete. For an example, see test_keys().
 Decide which is the next bucket to split and split it into two with the elements inside the bucket.
 
 
-
fun split_one_bucket<K, V>(table: &mut smart_table::SmartTable<K, V>)
+
fun split_one_bucket<K, V>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -716,23 +716,23 @@ Decide which is the next bucket to split and split it into two with the elements Implementation -
fun split_one_bucket<K, V>(table: &mut SmartTable<K, V>) {
-    let new_bucket_index = table.num_buckets;
+
fun split_one_bucket<K, V>(self: &mut SmartTable<K, V>) {
+    let new_bucket_index = self.num_buckets;
     // the next bucket to split is num_bucket without the most significant bit.
-    let to_split = new_bucket_index ^ (1 << table.level);
-    table.num_buckets = new_bucket_index + 1;
+    let to_split = new_bucket_index ^ (1 << self.level);
+    self.num_buckets = new_bucket_index + 1;
     // if the whole level is splitted once, bump the level.
-    if (to_split + 1 == 1 << table.level) {
-        table.level = table.level + 1;
+    if (to_split + 1 == 1 << self.level) {
+        self.level = self.level + 1;
     };
-    let old_bucket = table_with_length::borrow_mut(&mut table.buckets, to_split);
+    let old_bucket = table_with_length::borrow_mut(&mut self.buckets, to_split);
     // partition the bucket, [0..p) stays in old bucket, [p..len) goes to new bucket
     let p = vector::partition(old_bucket, |e| {
         let entry: &Entry<K, V> = e; // Explicit type to satisfy compiler
-        bucket_index(table.level, table.num_buckets, entry.hash) != new_bucket_index
+        bucket_index(self.level, self.num_buckets, entry.hash) != new_bucket_index
     });
     let new_bucket = vector::trim_reverse(old_bucket, p);
-    table_with_length::add(&mut table.buckets, new_bucket_index, new_bucket);
+    table_with_length::add(&mut self.buckets, new_bucket_index, new_bucket);
 }
 
@@ -782,7 +782,7 @@ Acquire an immutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow<K: drop, V>(table: &smart_table::SmartTable<K, V>, key: K): &V
+
public fun borrow<K: drop, V>(self: &smart_table::SmartTable<K, V>, key: K): &V
 
@@ -791,9 +791,9 @@ Aborts if there is no entry for key. Implementation -
public fun borrow<K: drop, V>(table: &SmartTable<K, V>, key: K): &V {
-    let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key));
-    let bucket = table_with_length::borrow(&table.buckets, index);
+
public fun borrow<K: drop, V>(self: &SmartTable<K, V>, key: K): &V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = table_with_length::borrow(&self.buckets, index);
     let i = 0;
     let len = vector::length(bucket);
     while (i < len) {
@@ -819,7 +819,7 @@ Acquire an immutable reference to the value which key maps to.
 Returns specified default value if there is no entry for key.
 
 
-
public fun borrow_with_default<K: copy, drop, V>(table: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
+
public fun borrow_with_default<K: copy, drop, V>(self: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
 
@@ -828,11 +828,11 @@ Returns specified default value if there is no entry for key. Implementation -
public fun borrow_with_default<K: copy + drop, V>(table: &SmartTable<K, V>, key: K, default: &V): &V {
-    if (!contains(table, copy key)) {
+
public fun borrow_with_default<K: copy + drop, V>(self: &SmartTable<K, V>, key: K, default: &V): &V {
+    if (!contains(self, copy key)) {
         default
     } else {
-        borrow(table, copy key)
+        borrow(self, copy key)
     }
 }
 
@@ -849,7 +849,7 @@ Acquire a mutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow_mut<K: drop, V>(table: &mut smart_table::SmartTable<K, V>, key: K): &mut V
+
public fun borrow_mut<K: drop, V>(self: &mut smart_table::SmartTable<K, V>, key: K): &mut V
 
@@ -858,9 +858,9 @@ Aborts if there is no entry for key. Implementation -
public fun borrow_mut<K: drop, V>(table: &mut SmartTable<K, V>, key: K): &mut V {
-    let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key));
-    let bucket = table_with_length::borrow_mut(&mut table.buckets, index);
+
public fun borrow_mut<K: drop, V>(self: &mut SmartTable<K, V>, key: K): &mut V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, index);
     let i = 0;
     let len = vector::length(bucket);
     while (i < len) {
@@ -886,7 +886,7 @@ Acquire a mutable reference to the value which key maps to.
 Insert the pair (key, default) first if there is no entry for key.
 
 
-
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut smart_table::SmartTable<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut smart_table::SmartTable<K, V>, key: K, default: V): &mut V
 
@@ -896,14 +896,14 @@ Insert the pair (key, default) first if there is no en
public fun borrow_mut_with_default<K: copy + drop, V: drop>(
-    table: &mut SmartTable<K, V>,
+    self: &mut SmartTable<K, V>,
     key: K,
     default: V
 ): &mut V {
-    if (!contains(table, copy key)) {
-        add(table, copy key, default)
+    if (!contains(self, copy key)) {
+        add(self, copy key, default)
     };
-    borrow_mut(table, key)
+    borrow_mut(self, key)
 }
 
@@ -918,7 +918,7 @@ Insert the pair (key, default) first if there is no en Returns true iff table contains an entry for key. -
public fun contains<K: drop, V>(table: &smart_table::SmartTable<K, V>, key: K): bool
+
public fun contains<K: drop, V>(self: &smart_table::SmartTable<K, V>, key: K): bool
 
@@ -927,10 +927,10 @@ Returns true iff table contains an Implementation -
public fun contains<K: drop, V>(table: &SmartTable<K, V>, key: K): bool {
+
public fun contains<K: drop, V>(self: &SmartTable<K, V>, key: K): bool {
     let hash = sip_hash_from_value(&key);
-    let index = bucket_index(table.level, table.num_buckets, hash);
-    let bucket = table_with_length::borrow(&table.buckets, index);
+    let index = bucket_index(self.level, self.num_buckets, hash);
+    let bucket = table_with_length::borrow(&self.buckets, index);
     vector::any(bucket, | entry | {
         let e: &Entry<K, V> = entry;
         e.hash == hash && &e.key == &key
@@ -950,7 +950,7 @@ Remove from table and return the v
 Aborts if there is no entry for key.
 
 
-
public fun remove<K: copy, drop, V>(table: &mut smart_table::SmartTable<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut smart_table::SmartTable<K, V>, key: K): V
 
@@ -959,16 +959,16 @@ Aborts if there is no entry for key. Implementation -
public fun remove<K: copy + drop, V>(table: &mut SmartTable<K, V>, key: K): V {
-    let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key));
-    let bucket = table_with_length::borrow_mut(&mut table.buckets, index);
+
public fun remove<K: copy + drop, V>(self: &mut SmartTable<K, V>, key: K): V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, index);
     let i = 0;
     let len = vector::length(bucket);
     while (i < len) {
         let entry = vector::borrow(bucket, i);
         if (&entry.key == &key) {
             let Entry { hash: _, key: _, value } = vector::swap_remove(bucket, i);
-            table.size = table.size - 1;
+            self.size = self.size - 1;
             return value
         };
         i = i + 1;
@@ -989,7 +989,7 @@ Insert the pair (key, value) if there is no entry for
 update the value of the entry for key to value otherwise
 
 
-
public fun upsert<K: copy, drop, V: drop>(table: &mut smart_table::SmartTable<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut smart_table::SmartTable<K, V>, key: K, value: V)
 
@@ -998,11 +998,11 @@ update the value of the entry for key to value otherwi Implementation -
public fun upsert<K: copy + drop, V: drop>(table: &mut SmartTable<K, V>, key: K, value: V) {
-    if (!contains(table, copy key)) {
-        add(table, copy key, value)
+
public fun upsert<K: copy + drop, V: drop>(self: &mut SmartTable<K, V>, key: K, value: V) {
+    if (!contains(self, copy key)) {
+        add(self, copy key, value)
     } else {
-        let ref = borrow_mut(table, key);
+        let ref = borrow_mut(self, key);
         *ref = value;
     };
 }
@@ -1019,7 +1019,7 @@ update the value of the entry for key to value otherwi
 Returns the length of the table, i.e. the number of entries.
 
 
-
public fun length<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun length<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1028,8 +1028,8 @@ Returns the length of the table, i.e. the number of entries. Implementation -
public fun length<K, V>(table: &SmartTable<K, V>): u64 {
-    table.size
+
public fun length<K, V>(self: &SmartTable<K, V>): u64 {
+    self.size
 }
 
@@ -1044,7 +1044,7 @@ Returns the length of the table, i.e. the number of entries. Return the load factor of the hashtable. -
public fun load_factor<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun load_factor<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1053,8 +1053,8 @@ Return the load factor of the hashtable. Implementation -
public fun load_factor<K, V>(table: &SmartTable<K, V>): u64 {
-    table.size * 100 / table.num_buckets / table.target_bucket_size
+
public fun load_factor<K, V>(self: &SmartTable<K, V>): u64 {
+    self.size * 100 / self.num_buckets / self.target_bucket_size
 }
 
@@ -1069,7 +1069,7 @@ Return the load factor of the hashtable. Update split_load_threshold. -
public fun update_split_load_threshold<K, V>(table: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
+
public fun update_split_load_threshold<K, V>(self: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
 
@@ -1078,12 +1078,12 @@ Update split_load_threshold. Implementation -
public fun update_split_load_threshold<K, V>(table: &mut SmartTable<K, V>, split_load_threshold: u8) {
+
public fun update_split_load_threshold<K, V>(self: &mut SmartTable<K, V>, split_load_threshold: u8) {
     assert!(
         split_load_threshold <= 100 && split_load_threshold > 0,
         error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT)
     );
-    table.split_load_threshold = split_load_threshold;
+    self.split_load_threshold = split_load_threshold;
 }
 
@@ -1098,7 +1098,7 @@ Update split_load_threshold. Update target_bucket_size. -
public fun update_target_bucket_size<K, V>(table: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
+
public fun update_target_bucket_size<K, V>(self: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
 
@@ -1107,9 +1107,9 @@ Update target_bucket_size. Implementation -
public fun update_target_bucket_size<K, V>(table: &mut SmartTable<K, V>, target_bucket_size: u64) {
+
public fun update_target_bucket_size<K, V>(self: &mut SmartTable<K, V>, target_bucket_size: u64) {
     assert!(target_bucket_size > 0, error::invalid_argument(EINVALID_TARGET_BUCKET_SIZE));
-    table.target_bucket_size = target_bucket_size;
+    self.target_bucket_size = target_bucket_size;
 }
 
@@ -1124,7 +1124,7 @@ Update target_bucket_size. Apply the function to a reference of each key-value pair in the table. -
public fun for_each_ref<K, V>(table: &smart_table::SmartTable<K, V>, f: |(&K, &V)|)
+
public fun for_each_ref<K, V>(self: &smart_table::SmartTable<K, V>, f: |(&K, &V)|)
 
@@ -1133,11 +1133,11 @@ Apply the function to a reference of each key-value pair in the table. Implementation -
public inline fun for_each_ref<K, V>(table: &SmartTable<K, V>, f: |&K, &V|) {
+
public inline fun for_each_ref<K, V>(self: &SmartTable<K, V>, f: |&K, &V|) {
     let i = 0;
-    while (i < aptos_std::smart_table::num_buckets(table)) {
+    while (i < aptos_std::smart_table::num_buckets(self)) {
         vector::for_each_ref(
-            aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i),
+            aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(self), i),
             |elem| {
                 let (key, value) = aptos_std::smart_table::borrow_kv(elem);
                 f(key, value)
@@ -1159,7 +1159,7 @@ Apply the function to a reference of each key-value pair in the table.
 Apply the function to a mutable reference of each key-value pair in the table.
 
 
-
public fun for_each_mut<K, V>(table: &mut smart_table::SmartTable<K, V>, f: |(&K, &mut V)|)
+
public fun for_each_mut<K, V>(self: &mut smart_table::SmartTable<K, V>, f: |(&K, &mut V)|)
 
@@ -1168,11 +1168,11 @@ Apply the function to a mutable reference of each key-value pair in the table. Implementation -
public inline fun for_each_mut<K, V>(table: &mut SmartTable<K, V>, f: |&K, &mut V|) {
+
public inline fun for_each_mut<K, V>(self: &mut SmartTable<K, V>, f: |&K, &mut V|) {
     let i = 0;
-    while (i < aptos_std::smart_table::num_buckets(table)) {
+    while (i < aptos_std::smart_table::num_buckets(self)) {
         vector::for_each_mut(
-            table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(table), i),
+            table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(self), i),
             |elem| {
                 let (key, value) = aptos_std::smart_table::borrow_kv_mut(elem);
                 f(key, value)
@@ -1194,7 +1194,7 @@ Apply the function to a mutable reference of each key-value pair in the table.
 Map the function over the references of key-value pairs in the table without modifying it.
 
 
-
public fun map_ref<K: copy, drop, store, V1, V2: store>(table: &smart_table::SmartTable<K, V1>, f: |&V1|V2): smart_table::SmartTable<K, V2>
+
public fun map_ref<K: copy, drop, store, V1, V2: store>(self: &smart_table::SmartTable<K, V1>, f: |&V1|V2): smart_table::SmartTable<K, V2>
 
@@ -1204,11 +1204,11 @@ Map the function over the references of key-value pairs in the table without mod
public inline fun map_ref<K: copy + drop + store, V1, V2: store>(
-    table: &SmartTable<K, V1>,
+    self: &SmartTable<K, V1>,
     f: |&V1|V2
 ): SmartTable<K, V2> {
     let new_table = new<K, V2>();
-    for_each_ref(table, |key, value| add(&mut new_table, *key, f(value)));
+    for_each_ref(self, |key, value| add(&mut new_table, *key, f(value)));
     new_table
 }
 
@@ -1224,7 +1224,7 @@ Map the function over the references of key-value pairs in the table without mod Return true if any key-value pair in the table satisfies the predicate. -
public fun any<K, V>(table: &smart_table::SmartTable<K, V>, p: |(&K, &V)|bool): bool
+
public fun any<K, V>(self: &smart_table::SmartTable<K, V>, p: |(&K, &V)|bool): bool
 
@@ -1234,13 +1234,13 @@ Return true if any key-value pair in the table satisfies the predicate.
public inline fun any<K, V>(
-    table: &SmartTable<K, V>,
+    self: &SmartTable<K, V>,
     p: |&K, &V|bool
 ): bool {
     let found = false;
     let i = 0;
-    while (i < aptos_std::smart_table::num_buckets(table)) {
-        found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i), |elem| {
+    while (i < aptos_std::smart_table::num_buckets(self)) {
+        found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(self), i), |elem| {
             let (key, value) = aptos_std::smart_table::borrow_kv(elem);
             p(key, value)
         });
@@ -1261,7 +1261,7 @@ Return true if any key-value pair in the table satisfies the predicate.
 
 
 
-
public fun borrow_kv<K, V>(e: &smart_table::Entry<K, V>): (&K, &V)
+
public fun borrow_kv<K, V>(self: &smart_table::Entry<K, V>): (&K, &V)
 
@@ -1270,8 +1270,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_kv<K, V>(e: &Entry<K, V>): (&K, &V) {
-    (&e.key, &e.value)
+
public fun borrow_kv<K, V>(self: &Entry<K, V>): (&K, &V) {
+    (&self.key, &self.value)
 }
 
@@ -1285,7 +1285,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun borrow_kv_mut<K, V>(e: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
+
public fun borrow_kv_mut<K, V>(self: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
 
@@ -1294,8 +1294,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_kv_mut<K, V>(e: &mut Entry<K, V>): (&mut K, &mut V) {
-    (&mut e.key, &mut e.value)
+
public fun borrow_kv_mut<K, V>(self: &mut Entry<K, V>): (&mut K, &mut V) {
+    (&mut self.key, &mut self.value)
 }
 
@@ -1309,7 +1309,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun num_buckets<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun num_buckets<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1318,8 +1318,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun num_buckets<K, V>(table: &SmartTable<K, V>): u64 {
-    table.num_buckets
+
public fun num_buckets<K, V>(self: &SmartTable<K, V>): u64 {
+    self.num_buckets
 }
 
@@ -1333,7 +1333,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun borrow_buckets<K, V>(table: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets<K, V>(self: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
@@ -1342,8 +1342,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_buckets<K, V>(table: &SmartTable<K, V>): &TableWithLength<u64, vector<Entry<K, V>>> {
-    &table.buckets
+
public fun borrow_buckets<K, V>(self: &SmartTable<K, V>): &TableWithLength<u64, vector<Entry<K, V>>> {
+    &self.buckets
 }
 
@@ -1357,7 +1357,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun borrow_buckets_mut<K, V>(table: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets_mut<K, V>(self: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
@@ -1366,8 +1366,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_buckets_mut<K, V>(table: &mut SmartTable<K, V>): &mut TableWithLength<u64, vector<Entry<K, V>>> {
-    &mut table.buckets
+
public fun borrow_buckets_mut<K, V>(self: &mut SmartTable<K, V>): &mut TableWithLength<u64, vector<Entry<K, V>>> {
+    &mut self.buckets
 }
 
@@ -1472,7 +1472,7 @@ map_spec_has_key = spec_contains; ### Function `destroy` -
public fun destroy<K: drop, V: drop>(table: smart_table::SmartTable<K, V>)
+
public fun destroy<K: drop, V: drop>(self: smart_table::SmartTable<K, V>)
 
@@ -1488,7 +1488,7 @@ map_spec_has_key = spec_contains; ### Function `clear` -
public fun clear<K: drop, V: drop>(table: &mut smart_table::SmartTable<K, V>)
+
public fun clear<K: drop, V: drop>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -1504,7 +1504,7 @@ map_spec_has_key = spec_contains; ### Function `add_all` -
public fun add_all<K, V>(table: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
+
public fun add_all<K, V>(self: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
 
@@ -1520,7 +1520,7 @@ map_spec_has_key = spec_contains; ### Function `to_simple_map` -
public fun to_simple_map<K: copy, drop, store, V: copy, store>(table: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
+
public fun to_simple_map<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
 
@@ -1536,7 +1536,7 @@ map_spec_has_key = spec_contains; ### Function `keys` -
public fun keys<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>): vector<K>
+
public fun keys<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): vector<K>
 
@@ -1552,7 +1552,7 @@ map_spec_has_key = spec_contains; ### Function `keys_paginated` -
public fun keys_paginated<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
+
public fun keys_paginated<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
 
@@ -1568,7 +1568,7 @@ map_spec_has_key = spec_contains; ### Function `split_one_bucket` -
fun split_one_bucket<K, V>(table: &mut smart_table::SmartTable<K, V>)
+
fun split_one_bucket<K, V>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -1600,7 +1600,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_with_default` -
public fun borrow_with_default<K: copy, drop, V>(table: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
+
public fun borrow_with_default<K: copy, drop, V>(self: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
 
@@ -1616,7 +1616,7 @@ map_spec_has_key = spec_contains; ### Function `load_factor` -
public fun load_factor<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun load_factor<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1632,7 +1632,7 @@ map_spec_has_key = spec_contains; ### Function `update_split_load_threshold` -
public fun update_split_load_threshold<K, V>(table: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
+
public fun update_split_load_threshold<K, V>(self: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
 
@@ -1648,7 +1648,7 @@ map_spec_has_key = spec_contains; ### Function `update_target_bucket_size` -
public fun update_target_bucket_size<K, V>(table: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
+
public fun update_target_bucket_size<K, V>(self: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
 
@@ -1664,7 +1664,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_kv` -
public fun borrow_kv<K, V>(e: &smart_table::Entry<K, V>): (&K, &V)
+
public fun borrow_kv<K, V>(self: &smart_table::Entry<K, V>): (&K, &V)
 
@@ -1680,7 +1680,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_kv_mut` -
public fun borrow_kv_mut<K, V>(e: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
+
public fun borrow_kv_mut<K, V>(self: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
 
@@ -1696,7 +1696,7 @@ map_spec_has_key = spec_contains; ### Function `num_buckets` -
public fun num_buckets<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun num_buckets<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1712,7 +1712,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_buckets` -
public fun borrow_buckets<K, V>(table: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets<K, V>(self: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
@@ -1728,7 +1728,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_buckets_mut` -
public fun borrow_buckets_mut<K, V>(table: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets_mut<K, V>(self: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/smart_vector.md b/aptos-move/framework/aptos-stdlib/doc/smart_vector.md index 45c5e9ddf75ed..cb8748a05d55f 100644 --- a/aptos-move/framework/aptos-stdlib/doc/smart_vector.md +++ b/aptos-move/framework/aptos-stdlib/doc/smart_vector.md @@ -297,11 +297,11 @@ Create a vector of length 1 containing the passed in T. ## Function `destroy_empty` -Destroy the vector v. -Aborts if v is not empty. +Destroy the vector self. +Aborts if self is not empty. -
public fun destroy_empty<T>(v: smart_vector::SmartVector<T>)
+
public fun destroy_empty<T>(self: smart_vector::SmartVector<T>)
 
@@ -310,9 +310,9 @@ Aborts if v is not empty. Implementation -
public fun destroy_empty<T>(v: SmartVector<T>) {
-    assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY));
-    let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = v;
+
public fun destroy_empty<T>(self: SmartVector<T>) {
+    assert!(is_empty(&self), error::invalid_argument(EVECTOR_NOT_EMPTY));
+    let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = self;
     vector::destroy_empty(inline_vec);
     option::destroy_none(big_vec);
 }
@@ -329,7 +329,7 @@ Aborts if v is not empty.
 Destroy a vector completely when T has drop.
 
 
-
public fun destroy<T: drop>(v: smart_vector::SmartVector<T>)
+
public fun destroy<T: drop>(self: smart_vector::SmartVector<T>)
 
@@ -338,9 +338,9 @@ Destroy a vector completely when T has drop. Implementation -
public fun destroy<T: drop>(v: SmartVector<T>) {
-    clear(&mut v);
-    destroy_empty(v);
+
public fun destroy<T: drop>(self: SmartVector<T>) {
+    clear(&mut self);
+    destroy_empty(self);
 }
 
@@ -355,7 +355,7 @@ Destroy a vector completely when T has drop. Clear a vector completely when T has drop. -
public fun clear<T: drop>(v: &mut smart_vector::SmartVector<T>)
+
public fun clear<T: drop>(self: &mut smart_vector::SmartVector<T>)
 
@@ -364,10 +364,10 @@ Clear a vector completely when T has drop. Implementation -
public fun clear<T: drop>(v: &mut SmartVector<T>) {
-    v.inline_vec = vector[];
-    if (option::is_some(&v.big_vec)) {
-        big_vector::destroy(option::extract(&mut v.big_vec));
+
public fun clear<T: drop>(self: &mut SmartVector<T>) {
+    self.inline_vec = vector[];
+    if (option::is_some(&self.big_vec)) {
+        big_vector::destroy(option::extract(&mut self.big_vec));
     }
 }
 
@@ -380,11 +380,11 @@ Clear a vector completely when T has drop. ## Function `borrow` -Acquire an immutable reference to the ith T of the vector v. +Acquire an immutable reference to the ith T of the vector self. Aborts if i is out of bounds. -
public fun borrow<T>(v: &smart_vector::SmartVector<T>, i: u64): &T
+
public fun borrow<T>(self: &smart_vector::SmartVector<T>, i: u64): &T
 
@@ -393,13 +393,13 @@ Aborts if i is out of bounds. Implementation -
public fun borrow<T>(v: &SmartVector<T>, i: u64): &T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+
public fun borrow<T>(self: &SmartVector<T>, i: u64): &T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
     if (i < inline_len) {
-        vector::borrow(&v.inline_vec, i)
+        vector::borrow(&self.inline_vec, i)
     } else {
-        big_vector::borrow(option::borrow(&v.big_vec), i - inline_len)
+        big_vector::borrow(option::borrow(&self.big_vec), i - inline_len)
     }
 }
 
@@ -412,11 +412,11 @@ Aborts if i is out of bounds. ## Function `borrow_mut` -Return a mutable reference to the ith T in the vector v. +Return a mutable reference to the ith T in the vector self. Aborts if i is out of bounds. -
public fun borrow_mut<T>(v: &mut smart_vector::SmartVector<T>, i: u64): &mut T
+
public fun borrow_mut<T>(self: &mut smart_vector::SmartVector<T>, i: u64): &mut T
 
@@ -425,13 +425,13 @@ Aborts if i is out of bounds. Implementation -
public fun borrow_mut<T>(v: &mut SmartVector<T>, i: u64): &mut T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+
public fun borrow_mut<T>(self: &mut SmartVector<T>, i: u64): &mut T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
     if (i < inline_len) {
-        vector::borrow_mut(&mut v.inline_vec, i)
+        vector::borrow_mut(&mut self.inline_vec, i)
     } else {
-        big_vector::borrow_mut(option::borrow_mut(&mut v.big_vec), i - inline_len)
+        big_vector::borrow_mut(option::borrow_mut(&mut self.big_vec), i - inline_len)
     }
 }
 
@@ -444,12 +444,12 @@ Aborts if i is out of bounds. ## Function `append` -Empty and destroy the other vector, and push each of the Ts in the other vector onto the lhs vector in the +Empty and destroy the other vector, and push each of the Ts in the other vector onto the self vector in the same order as they occurred in other. Disclaimer: This function may be costly. Use it at your own discretion. -
public fun append<T: store>(lhs: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
+
public fun append<T: store>(self: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
 
@@ -458,16 +458,16 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun append<T: store>(lhs: &mut SmartVector<T>, other: SmartVector<T>) {
+
public fun append<T: store>(self: &mut SmartVector<T>, other: SmartVector<T>) {
     let other_len = length(&other);
     let half_other_len = other_len / 2;
     let i = 0;
     while (i < half_other_len) {
-        push_back(lhs, swap_remove(&mut other, i));
+        push_back(self, swap_remove(&mut other, i));
         i = i + 1;
     };
     while (i < other_len) {
-        push_back(lhs, pop_back(&mut other));
+        push_back(self, pop_back(&mut other));
         i = i + 1;
     };
     destroy_empty(other);
@@ -485,7 +485,7 @@ Disclaimer: This function may be costly. Use it at your own discretion.
 Add multiple values to the vector at once.
 
 
-
public fun add_all<T: store>(v: &mut smart_vector::SmartVector<T>, vals: vector<T>)
+
public fun add_all<T: store>(self: &mut smart_vector::SmartVector<T>, vals: vector<T>)
 
@@ -494,8 +494,8 @@ Add multiple values to the vector at once. Implementation -
public fun add_all<T: store>(v: &mut SmartVector<T>, vals: vector<T>) {
-    vector::for_each(vals, |val| { push_back(v, val); })
+
public fun add_all<T: store>(self: &mut SmartVector<T>, vals: vector<T>) {
+    vector::for_each(vals, |val| { push_back(self, val); })
 }
 
@@ -512,7 +512,7 @@ atomic view of the whole vector. Disclaimer: This function may be costly as the smart vector may be huge in size. Use it at your own discretion. -
public fun to_vector<T: copy, store>(v: &smart_vector::SmartVector<T>): vector<T>
+
public fun to_vector<T: copy, store>(self: &smart_vector::SmartVector<T>): vector<T>
 
@@ -521,10 +521,10 @@ Disclaimer: This function may be costly as the smart vector may be huge in size. Implementation -
public fun to_vector<T: store + copy>(v: &SmartVector<T>): vector<T> {
-    let res = v.inline_vec;
-    if (option::is_some(&v.big_vec)) {
-        let big_vec = option::borrow(&v.big_vec);
+
public fun to_vector<T: store + copy>(self: &SmartVector<T>): vector<T> {
+    let res = self.inline_vec;
+    if (option::is_some(&self.big_vec)) {
+        let big_vec = option::borrow(&self.big_vec);
         vector::append(&mut res, big_vector::to_vector(big_vec));
     };
     res
@@ -539,11 +539,11 @@ Disclaimer: This function may be costly as the smart vector may be huge in size.
 
 ## Function `push_back`
 
-Add T val to the end of the vector v. It grows the buckets when the current buckets are full.
+Add T val to the end of the vector self. It grows the buckets when the current buckets are full.
 This operation will cost more gas when it adds new bucket.
 
 
-
public fun push_back<T: store>(v: &mut smart_vector::SmartVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut smart_vector::SmartVector<T>, val: T)
 
@@ -552,28 +552,28 @@ This operation will cost more gas when it adds new bucket. Implementation -
public fun push_back<T: store>(v: &mut SmartVector<T>, val: T) {
-    let len = length(v);
-    let inline_len = vector::length(&v.inline_vec);
+
public fun push_back<T: store>(self: &mut SmartVector<T>, val: T) {
+    let len = length(self);
+    let inline_len = vector::length(&self.inline_vec);
     if (len == inline_len) {
-        let bucket_size = if (option::is_some(&v.inline_capacity)) {
-            if (len < *option::borrow(&v.inline_capacity)) {
-                vector::push_back(&mut v.inline_vec, val);
+        let bucket_size = if (option::is_some(&self.inline_capacity)) {
+            if (len < *option::borrow(&self.inline_capacity)) {
+                vector::push_back(&mut self.inline_vec, val);
                 return
             };
-            *option::borrow(&v.bucket_size)
+            *option::borrow(&self.bucket_size)
         } else {
             let val_size = size_of_val(&val);
             if (val_size * (inline_len + 1) < 150 /* magic number */) {
-                vector::push_back(&mut v.inline_vec, val);
+                vector::push_back(&mut self.inline_vec, val);
                 return
             };
-            let estimated_avg_size = max((size_of_val(&v.inline_vec) + val_size) / (inline_len + 1), 1);
+            let estimated_avg_size = max((size_of_val(&self.inline_vec) + val_size) / (inline_len + 1), 1);
             max(1024 /* free_write_quota */ / estimated_avg_size, 1)
         };
-        option::fill(&mut v.big_vec, big_vector::empty(bucket_size));
+        option::fill(&mut self.big_vec, big_vector::empty(bucket_size));
     };
-    big_vector::push_back(option::borrow_mut(&mut v.big_vec), val);
+    big_vector::push_back(option::borrow_mut(&mut self.big_vec), val);
 }
 
@@ -585,11 +585,11 @@ This operation will cost more gas when it adds new bucket. ## Function `pop_back` -Pop an T from the end of vector v. It does shrink the buckets if they're empty. -Aborts if v is empty. +Pop an T from the end of vector self. It does shrink the buckets if they're empty. +Aborts if self is empty. -
public fun pop_back<T>(v: &mut smart_vector::SmartVector<T>): T
+
public fun pop_back<T>(self: &mut smart_vector::SmartVector<T>): T
 
@@ -598,9 +598,9 @@ Aborts if v is empty. Implementation -
public fun pop_back<T>(v: &mut SmartVector<T>): T {
-    assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY));
-    let big_vec_wrapper = &mut v.big_vec;
+
public fun pop_back<T>(self: &mut SmartVector<T>): T {
+    assert!(!is_empty(self), error::invalid_state(EVECTOR_EMPTY));
+    let big_vec_wrapper = &mut self.big_vec;
     if (option::is_some(big_vec_wrapper)) {
         let big_vec = option::extract(big_vec_wrapper);
         let val = big_vector::pop_back(&mut big_vec);
@@ -611,7 +611,7 @@ Aborts if v is empty.
         };
         val
     } else {
-        vector::pop_back(&mut v.inline_vec)
+        vector::pop_back(&mut self.inline_vec)
     }
 }
 
@@ -624,12 +624,12 @@ Aborts if v is empty. ## Function `remove` -Remove the T at index i in the vector v and return the owned value that was previously stored at i in v. +Remove the T at index i in the vector self and return the owned value that was previously stored at i in self. All Ts occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. Disclaimer: This function may be costly. Use it at your own discretion. -
public fun remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
@@ -638,14 +638,14 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun remove<T>(v: &mut SmartVector<T>, i: u64): T {
-    let len = length(v);
+
public fun remove<T>(self: &mut SmartVector<T>, i: u64): T {
+    let len = length(self);
     assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+    let inline_len = vector::length(&self.inline_vec);
     if (i < inline_len) {
-        vector::remove(&mut v.inline_vec, i)
+        vector::remove(&mut self.inline_vec, i)
     } else {
-        let big_vec_wrapper = &mut v.big_vec;
+        let big_vec_wrapper = &mut self.big_vec;
         let big_vec = option::extract(big_vec_wrapper);
         let val = big_vector::remove(&mut big_vec, i - inline_len);
         if (big_vector::is_empty(&big_vec)) {
@@ -666,12 +666,12 @@ Disclaimer: This function may be costly. Use it at your own discretion.
 
 ## Function `swap_remove`
 
-Swap the ith T of the vector v with the last T and then pop the vector.
+Swap the ith T of the vector self with the last T and then pop the vector.
 This is O(1), but does not preserve ordering of Ts in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun swap_remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
@@ -680,12 +680,12 @@ Aborts if i is out of bounds. Implementation -
public fun swap_remove<T>(v: &mut SmartVector<T>, i: u64): T {
-    let len = length(v);
+
public fun swap_remove<T>(self: &mut SmartVector<T>, i: u64): T {
+    let len = length(self);
     assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
-    let big_vec_wrapper = &mut v.big_vec;
-    let inline_vec = &mut v.inline_vec;
+    let inline_len = vector::length(&self.inline_vec);
+    let big_vec_wrapper = &mut self.big_vec;
+    let inline_vec = &mut self.inline_vec;
     if (i >= inline_len) {
         let big_vec = option::extract(big_vec_wrapper);
         let val = big_vector::swap_remove(&mut big_vec, i - inline_len);
@@ -720,10 +720,10 @@ Aborts if i is out of bounds.
 ## Function `swap`
 
 Swap the Ts at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds
-for v.
+for self.
 
 
-
public fun swap<T: store>(v: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
+
public fun swap<T: store>(self: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
 
@@ -732,20 +732,20 @@ for v. Implementation -
public fun swap<T: store>(v: &mut SmartVector<T>, i: u64, j: u64) {
+
public fun swap<T: store>(self: &mut SmartVector<T>, i: u64, j: u64) {
     if (i > j) {
-        return swap(v, j, i)
+        return swap(self, j, i)
     };
-    let len = length(v);
+    let len = length(self);
     assert!(j < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+    let inline_len = vector::length(&self.inline_vec);
     if (i >= inline_len) {
-        big_vector::swap(option::borrow_mut(&mut v.big_vec), i - inline_len, j - inline_len);
+        big_vector::swap(option::borrow_mut(&mut self.big_vec), i - inline_len, j - inline_len);
     } else if (j < inline_len) {
-        vector::swap(&mut v.inline_vec, i, j);
+        vector::swap(&mut self.inline_vec, i, j);
     } else {
-        let big_vec = option::borrow_mut(&mut v.big_vec);
-        let inline_vec = &mut v.inline_vec;
+        let big_vec = option::borrow_mut(&mut self.big_vec);
+        let inline_vec = &mut self.inline_vec;
         let element_i = vector::swap_remove(inline_vec, i);
         let element_j = big_vector::swap_remove(big_vec, j - inline_len);
         vector::push_back(inline_vec, element_j);
@@ -764,11 +764,11 @@ for v.
 
 ## Function `reverse`
 
-Reverse the order of the Ts in the vector v in-place.
+Reverse the order of the Ts in the vector self in-place.
 Disclaimer: This function may be costly. Use it at your own discretion.
 
 
-
public fun reverse<T: store>(v: &mut smart_vector::SmartVector<T>)
+
public fun reverse<T: store>(self: &mut smart_vector::SmartVector<T>)
 
@@ -777,33 +777,33 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun reverse<T: store>(v: &mut SmartVector<T>) {
-    let inline_len = vector::length(&v.inline_vec);
+
public fun reverse<T: store>(self: &mut SmartVector<T>) {
+    let inline_len = vector::length(&self.inline_vec);
     let i = 0;
     let new_inline_vec = vector[];
     // Push the last `inline_len` Ts into a temp vector.
     while (i < inline_len) {
-        vector::push_back(&mut new_inline_vec, pop_back(v));
+        vector::push_back(&mut new_inline_vec, pop_back(self));
         i = i + 1;
     };
     vector::reverse(&mut new_inline_vec);
     // Reverse the big_vector left if exists.
-    if (option::is_some(&v.big_vec)) {
-        big_vector::reverse(option::borrow_mut(&mut v.big_vec));
+    if (option::is_some(&self.big_vec)) {
+        big_vector::reverse(option::borrow_mut(&mut self.big_vec));
     };
     // Mem::swap the two vectors.
     let temp_vec = vector[];
-    while (!vector::is_empty(&mut v.inline_vec)) {
-        vector::push_back(&mut temp_vec, vector::pop_back(&mut v.inline_vec));
+    while (!vector::is_empty(&mut self.inline_vec)) {
+        vector::push_back(&mut temp_vec, vector::pop_back(&mut self.inline_vec));
     };
     vector::reverse(&mut temp_vec);
     while (!vector::is_empty(&mut new_inline_vec)) {
-        vector::push_back(&mut v.inline_vec, vector::pop_back(&mut new_inline_vec));
+        vector::push_back(&mut self.inline_vec, vector::pop_back(&mut new_inline_vec));
     };
     vector::destroy_empty(new_inline_vec);
     // Push the rest Ts originally left in inline_vector back to the end of the smart vector.
     while (!vector::is_empty(&mut temp_vec)) {
-        push_back(v, vector::pop_back(&mut temp_vec));
+        push_back(self, vector::pop_back(&mut temp_vec));
     };
     vector::destroy_empty(temp_vec);
 }
@@ -817,12 +817,12 @@ Disclaimer: This function may be costly. Use it at your own discretion.
 
 ## Function `index_of`
 
-Return (true, i) if val is in the vector v at index i.
+Return (true, i) if val is in the vector self at index i.
 Otherwise, returns (false, 0).
 Disclaimer: This function may be costly. Use it at your own discretion.
 
 
-
public fun index_of<T>(v: &smart_vector::SmartVector<T>, val: &T): (bool, u64)
+
public fun index_of<T>(self: &smart_vector::SmartVector<T>, val: &T): (bool, u64)
 
@@ -831,13 +831,13 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun index_of<T>(v: &SmartVector<T>, val: &T): (bool, u64) {
-    let (found, i) = vector::index_of(&v.inline_vec, val);
+
public fun index_of<T>(self: &SmartVector<T>, val: &T): (bool, u64) {
+    let (found, i) = vector::index_of(&self.inline_vec, val);
     if (found) {
         (true, i)
-    } else if (option::is_some(&v.big_vec)) {
-        let (found, i) = big_vector::index_of(option::borrow(&v.big_vec), val);
-        (found, i + vector::length(&v.inline_vec))
+    } else if (option::is_some(&self.big_vec)) {
+        let (found, i) = big_vector::index_of(option::borrow(&self.big_vec), val);
+        (found, i + vector::length(&self.inline_vec))
     } else {
         (false, 0)
     }
@@ -852,11 +852,11 @@ Disclaimer: This function may be costly. Use it at your own discretion.
 
 ## Function `contains`
 
-Return true if val is in the vector v.
+Return true if val is in the vector self.
 Disclaimer: This function may be costly. Use it at your own discretion.
 
 
-
public fun contains<T>(v: &smart_vector::SmartVector<T>, val: &T): bool
+
public fun contains<T>(self: &smart_vector::SmartVector<T>, val: &T): bool
 
@@ -865,9 +865,9 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun contains<T>(v: &SmartVector<T>, val: &T): bool {
-    if (is_empty(v)) return false;
-    let (exist, _) = index_of(v, val);
+
public fun contains<T>(self: &SmartVector<T>, val: &T): bool {
+    if (is_empty(self)) return false;
+    let (exist, _) = index_of(self, val);
     exist
 }
 
@@ -883,7 +883,7 @@ Disclaimer: This function may be costly. Use it at your own discretion. Return the length of the vector. -
public fun length<T>(v: &smart_vector::SmartVector<T>): u64
+
public fun length<T>(self: &smart_vector::SmartVector<T>): u64
 
@@ -892,11 +892,11 @@ Return the length of the vector. Implementation -
public fun length<T>(v: &SmartVector<T>): u64 {
-    vector::length(&v.inline_vec) + if (option::is_none(&v.big_vec)) {
+
public fun length<T>(self: &SmartVector<T>): u64 {
+    vector::length(&self.inline_vec) + if (option::is_none(&self.big_vec)) {
         0
     } else {
-        big_vector::length(option::borrow(&v.big_vec))
+        big_vector::length(option::borrow(&self.big_vec))
     }
 }
 
@@ -909,10 +909,10 @@ Return the length of the vector. ## Function `is_empty` -Return true if the vector v has no Ts and false otherwise. +Return true if the vector self has no Ts and false otherwise. -
public fun is_empty<T>(v: &smart_vector::SmartVector<T>): bool
+
public fun is_empty<T>(self: &smart_vector::SmartVector<T>): bool
 
@@ -921,8 +921,8 @@ Return true if the vector v has no Ts and Implementation -
public fun is_empty<T>(v: &SmartVector<T>): bool {
-    length(v) == 0
+
public fun is_empty<T>(self: &SmartVector<T>): bool {
+    length(self) == 0
 }
 
@@ -937,7 +937,7 @@ Return true if the vector v has no Ts and public fun for_each<T: store>(v: smart_vector::SmartVector<T>, f: |T|) +
public fun for_each<T: store>(self: smart_vector::SmartVector<T>, f: |T|)
 
@@ -946,9 +946,9 @@ Apply the function to each T in the vector, consuming it. Implementation -
public inline fun for_each<T: store>(v: SmartVector<T>, f: |T|) {
-    aptos_std::smart_vector::reverse(&mut v); // We need to reverse the vector to consume it efficiently
-    aptos_std::smart_vector::for_each_reverse(v, |e| f(e));
+
public inline fun for_each<T: store>(self: SmartVector<T>, f: |T|) {
+    aptos_std::smart_vector::reverse(&mut self); // We need to reverse the vector to consume it efficiently
+    aptos_std::smart_vector::for_each_reverse(self, |e| f(e));
 }
 
@@ -963,7 +963,7 @@ Apply the function to each T in the vector, consuming it. Apply the function to each T in the vector, consuming it. -
public fun for_each_reverse<T>(v: smart_vector::SmartVector<T>, f: |T|)
+
public fun for_each_reverse<T>(self: smart_vector::SmartVector<T>, f: |T|)
 
@@ -972,13 +972,13 @@ Apply the function to each T in the vector, consuming it. Implementation -
public inline fun for_each_reverse<T>(v: SmartVector<T>, f: |T|) {
-    let len = aptos_std::smart_vector::length(&v);
+
public inline fun for_each_reverse<T>(self: SmartVector<T>, f: |T|) {
+    let len = aptos_std::smart_vector::length(&self);
     while (len > 0) {
-        f(aptos_std::smart_vector::pop_back(&mut v));
+        f(aptos_std::smart_vector::pop_back(&mut self));
         len = len - 1;
     };
-    aptos_std::smart_vector::destroy_empty(v)
+    aptos_std::smart_vector::destroy_empty(self)
 }
 
@@ -993,7 +993,7 @@ Apply the function to each T in the vector, consuming it. Apply the function to a reference of each T in the vector. -
public fun for_each_ref<T>(v: &smart_vector::SmartVector<T>, f: |&T|)
+
public fun for_each_ref<T>(self: &smart_vector::SmartVector<T>, f: |&T|)
 
@@ -1002,11 +1002,11 @@ Apply the function to a reference of each T in the vector. Implementation -
public inline fun for_each_ref<T>(v: &SmartVector<T>, f: |&T|) {
+
public inline fun for_each_ref<T>(self: &SmartVector<T>, f: |&T|) {
     let i = 0;
-    let len = aptos_std::smart_vector::length(v);
+    let len = aptos_std::smart_vector::length(self);
     while (i < len) {
-        f(aptos_std::smart_vector::borrow(v, i));
+        f(aptos_std::smart_vector::borrow(self, i));
         i = i + 1
     }
 }
@@ -1023,7 +1023,7 @@ Apply the function to a reference of each T in the vector.
 Apply the function to a mutable reference to each T in the vector.
 
 
-
public fun for_each_mut<T>(v: &mut smart_vector::SmartVector<T>, f: |&mut T|)
+
public fun for_each_mut<T>(self: &mut smart_vector::SmartVector<T>, f: |&mut T|)
 
@@ -1032,11 +1032,11 @@ Apply the function to a mutable reference to each T in the vector. Implementation -
public inline fun for_each_mut<T>(v: &mut SmartVector<T>, f: |&mut T|) {
+
public inline fun for_each_mut<T>(self: &mut SmartVector<T>, f: |&mut T|) {
     let i = 0;
-    let len = aptos_std::smart_vector::length(v);
+    let len = aptos_std::smart_vector::length(self);
     while (i < len) {
-        f(aptos_std::smart_vector::borrow_mut(v, i));
+        f(aptos_std::smart_vector::borrow_mut(self, i));
         i = i + 1
     }
 }
@@ -1053,7 +1053,7 @@ Apply the function to a mutable reference to each T in the vector.
 Apply the function to a reference of each T in the vector with its index.
 
 
-
public fun enumerate_ref<T>(v: &smart_vector::SmartVector<T>, f: |(u64, &T)|)
+
public fun enumerate_ref<T>(self: &smart_vector::SmartVector<T>, f: |(u64, &T)|)
 
@@ -1062,11 +1062,11 @@ Apply the function to a reference of each T in the vector with its index. Implementation -
public inline fun enumerate_ref<T>(v: &SmartVector<T>, f: |u64, &T|) {
+
public inline fun enumerate_ref<T>(self: &SmartVector<T>, f: |u64, &T|) {
     let i = 0;
-    let len = aptos_std::smart_vector::length(v);
+    let len = aptos_std::smart_vector::length(self);
     while (i < len) {
-        f(i, aptos_std::smart_vector::borrow(v, i));
+        f(i, aptos_std::smart_vector::borrow(self, i));
         i = i + 1;
     };
 }
@@ -1083,7 +1083,7 @@ Apply the function to a reference of each T in the vector with its index.
 Apply the function to a mutable reference of each T in the vector with its index.
 
 
-
public fun enumerate_mut<T>(v: &mut smart_vector::SmartVector<T>, f: |(u64, &mut T)|)
+
public fun enumerate_mut<T>(self: &mut smart_vector::SmartVector<T>, f: |(u64, &mut T)|)
 
@@ -1092,11 +1092,11 @@ Apply the function to a mutable reference of each T in the vector with its index Implementation -
public inline fun enumerate_mut<T>(v: &mut SmartVector<T>, f: |u64, &mut T|) {
+
public inline fun enumerate_mut<T>(self: &mut SmartVector<T>, f: |u64, &mut T|) {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        f(i, borrow_mut(v, i));
+        f(i, borrow_mut(self, i));
         i = i + 1;
     };
 }
@@ -1114,7 +1114,7 @@ Fold the function over the Ts. For example, fold<Accumulator, T: store>(v: smart_vector::SmartVector<T>, init: Accumulator, f: |(Accumulator, T)|Accumulator): Accumulator
+
public fun fold<Accumulator, T: store>(self: smart_vector::SmartVector<T>, init: Accumulator, f: |(Accumulator, T)|Accumulator): Accumulator
 
@@ -1124,12 +1124,12 @@ Fold the function over the Ts. For example, fold<Accumulator, T: store>( - v: SmartVector<T>, + self: SmartVector<T>, init: Accumulator, f: |Accumulator, T|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each(v, |elem| accu = f(accu, elem)); + aptos_std::smart_vector::for_each(self, |elem| accu = f(accu, elem)); accu }
@@ -1146,7 +1146,7 @@ Fold right like fold above but working right to left. For example, f(1, f(2, f(3, 0))) -
public fun foldr<Accumulator, T>(v: smart_vector::SmartVector<T>, init: Accumulator, f: |(T, Accumulator)|Accumulator): Accumulator
+
public fun foldr<Accumulator, T>(self: smart_vector::SmartVector<T>, init: Accumulator, f: |(T, Accumulator)|Accumulator): Accumulator
 
@@ -1156,12 +1156,12 @@ Fold right like fold above but working right to left. For example, public inline fun foldr<Accumulator, T>( - v: SmartVector<T>, + self: SmartVector<T>, init: Accumulator, f: |T, Accumulator|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each_reverse(v, |elem| accu = f(elem, accu)); + aptos_std::smart_vector::for_each_reverse(self, |elem| accu = f(elem, accu)); accu }
@@ -1178,7 +1178,7 @@ Map the function over the references of the Ts of the vector, producing a new ve original vector. -
public fun map_ref<T1, T2: store>(v: &smart_vector::SmartVector<T1>, f: |&T1|T2): smart_vector::SmartVector<T2>
+
public fun map_ref<T1, T2: store>(self: &smart_vector::SmartVector<T1>, f: |&T1|T2): smart_vector::SmartVector<T2>
 
@@ -1188,11 +1188,11 @@ original vector.
public inline fun map_ref<T1, T2: store>(
-    v: &SmartVector<T1>,
+    self: &SmartVector<T1>,
     f: |&T1|T2
 ): SmartVector<T2> {
     let result = aptos_std::smart_vector::new<T2>();
-    aptos_std::smart_vector::for_each_ref(v, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem)));
+    aptos_std::smart_vector::for_each_ref(self, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem)));
     result
 }
 
@@ -1208,7 +1208,7 @@ original vector. Map the function over the Ts of the vector, producing a new vector. -
public fun map<T1: store, T2: store>(v: smart_vector::SmartVector<T1>, f: |T1|T2): smart_vector::SmartVector<T2>
+
public fun map<T1: store, T2: store>(self: smart_vector::SmartVector<T1>, f: |T1|T2): smart_vector::SmartVector<T2>
 
@@ -1218,11 +1218,11 @@ Map the function over the Ts of the vector, producing a new vector.
public inline fun map<T1: store, T2: store>(
-    v: SmartVector<T1>,
+    self: SmartVector<T1>,
     f: |T1|T2
 ): SmartVector<T2> {
     let result = aptos_std::smart_vector::new<T2>();
-    aptos_std::smart_vector::for_each(v, |elem| push_back(&mut result, f(elem)));
+    aptos_std::smart_vector::for_each(self, |elem| push_back(&mut result, f(elem)));
     result
 }
 
@@ -1238,7 +1238,7 @@ Map the function over the Ts of the vector, producing a new vector. Filter the vector using the boolean function, removing all Ts for which p(e) is not true. -
public fun filter<T: drop, store>(v: smart_vector::SmartVector<T>, p: |&T|bool): smart_vector::SmartVector<T>
+
public fun filter<T: drop, store>(self: smart_vector::SmartVector<T>, p: |&T|bool): smart_vector::SmartVector<T>
 
@@ -1248,11 +1248,11 @@ Filter the vector using the boolean function, removing all Ts for which p(
public inline fun filter<T: store + drop>(
-    v: SmartVector<T>,
+    self: SmartVector<T>,
     p: |&T|bool
 ): SmartVector<T> {
     let result = aptos_std::smart_vector::new<T>();
-    aptos_std::smart_vector::for_each(v, |elem| {
+    aptos_std::smart_vector::for_each(self, |elem| {
         if (p(&elem)) aptos_std::smart_vector::push_back(&mut result, elem);
     });
     result
@@ -1269,7 +1269,7 @@ Filter the vector using the boolean function, removing all Ts for which p(
 
 
 
-
public fun zip<T1: store, T2: store>(v1: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
+
public fun zip<T1: store, T2: store>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
 
@@ -1278,11 +1278,11 @@ Filter the vector using the boolean function, removing all Ts for which p( Implementation -
public inline fun zip<T1: store, T2: store>(v1: SmartVector<T1>, v2: SmartVector<T2>, f: |T1, T2|) {
+
public inline fun zip<T1: store, T2: store>(self: SmartVector<T1>, v2: SmartVector<T2>, f: |T1, T2|) {
     // We need to reverse the vectors to consume it efficiently
-    aptos_std::smart_vector::reverse(&mut v1);
+    aptos_std::smart_vector::reverse(&mut self);
     aptos_std::smart_vector::reverse(&mut v2);
-    aptos_std::smart_vector::zip_reverse(v1, v2, |e1, e2| f(e1, e2));
+    aptos_std::smart_vector::zip_reverse(self, v2, |e1, e2| f(e1, e2));
 }
 
@@ -1298,7 +1298,7 @@ Apply the function to each pair of elements in the two given vectors in the reve This errors out if the vectors are not of the same length. -
public fun zip_reverse<T1, T2>(v1: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
+
public fun zip_reverse<T1, T2>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
 
@@ -1308,19 +1308,19 @@ This errors out if the vectors are not of the same length.
public inline fun zip_reverse<T1, T2>(
-    v1: SmartVector<T1>,
+    self: SmartVector<T1>,
     v2: SmartVector<T2>,
     f: |T1, T2|,
 ) {
-    let len = aptos_std::smart_vector::length(&v1);
+    let len = aptos_std::smart_vector::length(&self);
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
     assert!(len == aptos_std::smart_vector::length(&v2), 0x20005);
     while (len > 0) {
-        f(aptos_std::smart_vector::pop_back(&mut v1), aptos_std::smart_vector::pop_back(&mut v2));
+        f(aptos_std::smart_vector::pop_back(&mut self), aptos_std::smart_vector::pop_back(&mut v2));
         len = len - 1;
     };
-    aptos_std::smart_vector::destroy_empty(v1);
+    aptos_std::smart_vector::destroy_empty(self);
     aptos_std::smart_vector::destroy_empty(v2);
 }
 
@@ -1337,7 +1337,7 @@ Apply the function to the references of each pair of elements in the two given v This errors out if the vectors are not of the same length. -
public fun zip_ref<T1, T2>(v1: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|)
+
public fun zip_ref<T1, T2>(self: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|)
 
@@ -1347,17 +1347,17 @@ This errors out if the vectors are not of the same length.
public inline fun zip_ref<T1, T2>(
-    v1: &SmartVector<T1>,
+    self: &SmartVector<T1>,
     v2: &SmartVector<T2>,
     f: |&T1, &T2|,
 ) {
-    let len = aptos_std::smart_vector::length(v1);
+    let len = aptos_std::smart_vector::length(self);
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
     assert!(len == aptos_std::smart_vector::length(v2), 0x20005);
     let i = 0;
     while (i < len) {
-        f(aptos_std::smart_vector::borrow(v1, i), aptos_std::smart_vector::borrow(v2, i));
+        f(aptos_std::smart_vector::borrow(self, i), aptos_std::smart_vector::borrow(v2, i));
         i = i + 1
     }
 }
@@ -1375,7 +1375,7 @@ Apply the function to mutable references to each pair of elements in the two giv
 This errors out if the vectors are not of the same length.
 
 
-
public fun zip_mut<T1, T2>(v1: &mut smart_vector::SmartVector<T1>, v2: &mut smart_vector::SmartVector<T2>, f: |(&mut T1, &mut T2)|)
+
public fun zip_mut<T1, T2>(self: &mut smart_vector::SmartVector<T1>, v2: &mut smart_vector::SmartVector<T2>, f: |(&mut T1, &mut T2)|)
 
@@ -1385,17 +1385,17 @@ This errors out if the vectors are not of the same length.
public inline fun zip_mut<T1, T2>(
-    v1: &mut SmartVector<T1>,
+    self: &mut SmartVector<T1>,
     v2: &mut SmartVector<T2>,
     f: |&mut T1, &mut T2|,
 ) {
     let i = 0;
-    let len = aptos_std::smart_vector::length(v1);
+    let len = aptos_std::smart_vector::length(self);
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
     assert!(len == aptos_std::smart_vector::length(v2), 0x20005);
     while (i < len) {
-        f(aptos_std::smart_vector::borrow_mut(v1, i), aptos_std::smart_vector::borrow_mut(v2, i));
+        f(aptos_std::smart_vector::borrow_mut(self, i), aptos_std::smart_vector::borrow_mut(v2, i));
         i = i + 1
     }
 }
@@ -1412,7 +1412,7 @@ This errors out if the vectors are not of the same length.
 Map the function over the element pairs of the two vectors, producing a new vector.
 
 
-
public fun zip_map<T1: store, T2: store, NewT: store>(v1: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|NewT): smart_vector::SmartVector<NewT>
+
public fun zip_map<T1: store, T2: store, NewT: store>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|NewT): smart_vector::SmartVector<NewT>
 
@@ -1422,16 +1422,16 @@ Map the function over the element pairs of the two vectors, producing a new vect
public inline fun zip_map<T1: store, T2: store, NewT: store>(
-    v1: SmartVector<T1>,
+    self: SmartVector<T1>,
     v2: SmartVector<T2>,
     f: |T1, T2|NewT
 ): SmartVector<NewT> {
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(aptos_std::smart_vector::length(&v1) == aptos_std::smart_vector::length(&v2), 0x20005);
+    assert!(aptos_std::smart_vector::length(&self) == aptos_std::smart_vector::length(&v2), 0x20005);
 
     let result = aptos_std::smart_vector::new<NewT>();
-    aptos_std::smart_vector::zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    aptos_std::smart_vector::zip(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
     result
 }
 
@@ -1448,7 +1448,7 @@ Map the function over the references of the element pairs of two vectors, produc values without modifying the original vectors. -
public fun zip_map_ref<T1, T2, NewT: store>(v1: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|NewT): smart_vector::SmartVector<NewT>
+
public fun zip_map_ref<T1, T2, NewT: store>(self: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|NewT): smart_vector::SmartVector<NewT>
 
@@ -1458,16 +1458,16 @@ values without modifying the original vectors.
public inline fun zip_map_ref<T1, T2, NewT: store>(
-    v1: &SmartVector<T1>,
+    self: &SmartVector<T1>,
     v2: &SmartVector<T2>,
     f: |&T1, &T2|NewT
 ): SmartVector<NewT> {
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(aptos_std::smart_vector::length(v1) == aptos_std::smart_vector::length(v2), 0x20005);
+    assert!(aptos_std::smart_vector::length(self) == aptos_std::smart_vector::length(v2), 0x20005);
 
     let result = aptos_std::smart_vector::new<NewT>();
-    aptos_std::smart_vector::zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    aptos_std::smart_vector::zip_ref(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
     result
 }
 
@@ -1568,15 +1568,15 @@ values without modifying the original vectors. ### Function `destroy_empty` -
public fun destroy_empty<T>(v: smart_vector::SmartVector<T>)
+
public fun destroy_empty<T>(self: smart_vector::SmartVector<T>)
 
-
aborts_if !(is_empty(v));
-aborts_if len(v.inline_vec) != 0
-    || option::is_some(v.big_vec);
+
aborts_if !(is_empty(self));
+aborts_if len(self.inline_vec) != 0
+    || option::is_some(self.big_vec);
 
@@ -1586,15 +1586,15 @@ values without modifying the original vectors. ### Function `borrow` -
public fun borrow<T>(v: &smart_vector::SmartVector<T>, i: u64): &T
+
public fun borrow<T>(self: &smart_vector::SmartVector<T>, i: u64): &T
 
-
aborts_if i >= length(v);
-aborts_if option::is_some(v.big_vec) && (
-    (len(v.inline_vec) + big_vector::length<T>(option::borrow(v.big_vec))) > MAX_U64
+
aborts_if i >= length(self);
+aborts_if option::is_some(self.big_vec) && (
+    (len(self.inline_vec) + big_vector::length<T>(option::borrow(self.big_vec))) > MAX_U64
 );
 
@@ -1605,7 +1605,7 @@ values without modifying the original vectors. ### Function `append` -
public fun append<T: store>(lhs: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
+
public fun append<T: store>(self: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
 
@@ -1621,7 +1621,7 @@ values without modifying the original vectors. ### Function `push_back` -
public fun push_back<T: store>(v: &mut smart_vector::SmartVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut smart_vector::SmartVector<T>, val: T)
 
@@ -1637,21 +1637,21 @@ values without modifying the original vectors. ### Function `pop_back` -
public fun pop_back<T>(v: &mut smart_vector::SmartVector<T>): T
+
public fun pop_back<T>(self: &mut smart_vector::SmartVector<T>): T
 
pragma verify_duration_estimate = 120;
-aborts_if  option::is_some(v.big_vec)
+aborts_if  option::is_some(self.big_vec)
     &&
-    (table_with_length::spec_len(option::borrow(v.big_vec).buckets) == 0);
-aborts_if is_empty(v);
-aborts_if option::is_some(v.big_vec) && (
-    (len(v.inline_vec) + big_vector::length<T>(option::borrow(v.big_vec))) > MAX_U64
+    (table_with_length::spec_len(option::borrow(self.big_vec).buckets) == 0);
+aborts_if is_empty(self);
+aborts_if option::is_some(self.big_vec) && (
+    (len(self.inline_vec) + big_vector::length<T>(option::borrow(self.big_vec))) > MAX_U64
 );
-ensures length(v) == length(old(v)) - 1;
+ensures length(self) == length(old(self)) - 1;
 
@@ -1661,7 +1661,7 @@ values without modifying the original vectors. ### Function `remove` -
public fun remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
@@ -1677,18 +1677,18 @@ values without modifying the original vectors. ### Function `swap_remove` -
public fun swap_remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
pragma verify = false;
-aborts_if i >= length(v);
-aborts_if option::is_some(v.big_vec) && (
-    (len(v.inline_vec) + big_vector::length<T>(option::borrow(v.big_vec))) > MAX_U64
+aborts_if i >= length(self);
+aborts_if option::is_some(self.big_vec) && (
+    (len(self.inline_vec) + big_vector::length<T>(option::borrow(self.big_vec))) > MAX_U64
 );
-ensures length(v) == length(old(v)) - 1;
+ensures length(self) == length(old(self)) - 1;
 
@@ -1698,7 +1698,7 @@ values without modifying the original vectors. ### Function `swap` -
public fun swap<T: store>(v: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
+
public fun swap<T: store>(self: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
 
@@ -1714,13 +1714,14 @@ values without modifying the original vectors. ### Function `length` -
public fun length<T>(v: &smart_vector::SmartVector<T>): u64
+
public fun length<T>(self: &smart_vector::SmartVector<T>): u64
 
-
aborts_if option::is_some(v.big_vec) && len(v.inline_vec) + big_vector::length(option::spec_borrow(v.big_vec)) > MAX_U64;
+
aborts_if option::is_some(self.big_vec) && len(self.inline_vec) + big_vector::length(option::spec_borrow(
+    self.big_vec)) > MAX_U64;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/table.md b/aptos-move/framework/aptos-stdlib/doc/table.md index d05ea82adb590..44f2e5627b7d3 100644 --- a/aptos-move/framework/aptos-stdlib/doc/table.md +++ b/aptos-move/framework/aptos-stdlib/doc/table.md @@ -140,7 +140,7 @@ key already exists. The entry itself is not stored in the table, and cannot be discovered from it. -
public fun add<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K, val: V)
 
@@ -149,8 +149,8 @@ table, and cannot be discovered from it. Implementation -
public fun add<K: copy + drop, V>(table: &mut Table<K, V>, key: K, val: V) {
-    add_box<K, V, Box<V>>(table, key, Box { val })
+
public fun add<K: copy + drop, V>(self: &mut Table<K, V>, key: K, val: V) {
+    add_box<K, V, Box<V>>(self, key, Box { val })
 }
 
@@ -166,7 +166,7 @@ Acquire an immutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow<K: copy, drop, V>(table: &table::Table<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table::Table<K, V>, key: K): &V
 
@@ -175,8 +175,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow<K: copy + drop, V>(table: &Table<K, V>, key: K): &V {
-    &borrow_box<K, V, Box<V>>(table, key).val
+
public fun borrow<K: copy + drop, V>(self: &Table<K, V>, key: K): &V {
+    &borrow_box<K, V, Box<V>>(self, key).val
 }
 
@@ -192,7 +192,7 @@ Acquire an immutable reference to the value which key maps to. Returns specified default value if there is no entry for key. -
public fun borrow_with_default<K: copy, drop, V>(table: &table::Table<K, V>, key: K, default: &V): &V
+
public fun borrow_with_default<K: copy, drop, V>(self: &table::Table<K, V>, key: K, default: &V): &V
 
@@ -201,11 +201,11 @@ Returns specified default value if there is no entry for key. Implementation -
public fun borrow_with_default<K: copy + drop, V>(table: &Table<K, V>, key: K, default: &V): &V {
-    if (!contains(table, copy key)) {
+
public fun borrow_with_default<K: copy + drop, V>(self: &Table<K, V>, key: K, default: &V): &V {
+    if (!contains(self, copy key)) {
         default
     } else {
-        borrow(table, copy key)
+        borrow(self, copy key)
     }
 }
 
@@ -222,7 +222,7 @@ Acquire a mutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow_mut<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): &mut V
 
@@ -231,8 +231,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow_mut<K: copy + drop, V>(table: &mut Table<K, V>, key: K): &mut V {
-    &mut borrow_box_mut<K, V, Box<V>>(table, key).val
+
public fun borrow_mut<K: copy + drop, V>(self: &mut Table<K, V>, key: K): &mut V {
+    &mut borrow_box_mut<K, V, Box<V>>(self, key).val
 }
 
@@ -248,7 +248,7 @@ Acquire a mutable reference to the value which key maps to. Insert the pair (key, default) first if there is no entry for key. -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, default: V): &mut V
 
@@ -257,11 +257,11 @@ Insert the pair (key, default) first if there is no en Implementation -
public fun borrow_mut_with_default<K: copy + drop, V: drop>(table: &mut Table<K, V>, key: K, default: V): &mut V {
-    if (!contains(table, copy key)) {
-        add(table, copy key, default)
+
public fun borrow_mut_with_default<K: copy + drop, V: drop>(self: &mut Table<K, V>, key: K, default: V): &mut V {
+    if (!contains(self, copy key)) {
+        add(self, copy key, default)
     };
-    borrow_mut(table, key)
+    borrow_mut(self, key)
 }
 
@@ -277,7 +277,7 @@ Insert the pair (key, value) if there is no entry for update the value of the entry for key to value otherwise -
public fun upsert<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, value: V)
 
@@ -286,11 +286,11 @@ update the value of the entry for key to value otherwi Implementation -
public fun upsert<K: copy + drop, V: drop>(table: &mut Table<K, V>, key: K, value: V) {
-    if (!contains(table, copy key)) {
-        add(table, copy key, value)
+
public fun upsert<K: copy + drop, V: drop>(self: &mut Table<K, V>, key: K, value: V) {
+    if (!contains(self, copy key)) {
+        add(self, copy key, value)
     } else {
-        let ref = borrow_mut(table, key);
+        let ref = borrow_mut(self, key);
         *ref = value;
     };
 }
@@ -304,11 +304,11 @@ update the value of the entry for key to value otherwi
 
 ## Function `remove`
 
-Remove from table and return the value which key maps to.
+Remove from self and return the value which key maps to.
 Aborts if there is no entry for key.
 
 
-
public fun remove<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): V
 
@@ -317,8 +317,8 @@ Aborts if there is no entry for key. Implementation -
public fun remove<K: copy + drop, V>(table: &mut Table<K, V>, key: K): V {
-    let Box { val } = remove_box<K, V, Box<V>>(table, key);
+
public fun remove<K: copy + drop, V>(self: &mut Table<K, V>, key: K): V {
+    let Box { val } = remove_box<K, V, Box<V>>(self, key);
     val
 }
 
@@ -331,10 +331,10 @@ Aborts if there is no entry for key. ## Function `contains` -Returns true iff table contains an entry for key. +Returns true iff self contains an entry for key. -
public fun contains<K: copy, drop, V>(table: &table::Table<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table::Table<K, V>, key: K): bool
 
@@ -343,8 +343,8 @@ Returns true iff table contains an Implementation -
public fun contains<K: copy + drop, V>(table: &Table<K, V>, key: K): bool {
-    contains_box<K, V, Box<V>>(table, key)
+
public fun contains<K: copy + drop, V>(self: &Table<K, V>, key: K): bool {
+    contains_box<K, V, Box<V>>(self, key)
 }
 
@@ -358,7 +358,7 @@ Returns true iff table contains an -
public(friend) fun destroy<K: copy, drop, V>(table: table::Table<K, V>)
+
public(friend) fun destroy<K: copy, drop, V>(self: table::Table<K, V>)
 
@@ -367,9 +367,9 @@ Returns true iff table contains an Implementation -
public(friend) fun destroy<K: copy + drop, V>(table: Table<K, V>) {
-    destroy_empty_box<K, V, Box<V>>(&table);
-    drop_unchecked_box<K, V, Box<V>>(table)
+
public(friend) fun destroy<K: copy + drop, V>(self: Table<K, V>) {
+    destroy_empty_box<K, V, Box<V>>(&self);
+    drop_unchecked_box<K, V, Box<V>>(self)
 }
 
@@ -618,7 +618,7 @@ Returns true iff table contains an ### Function `add` -
public fun add<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K, val: V)
 
@@ -634,7 +634,7 @@ Returns true iff table contains an ### Function `borrow` -
public fun borrow<K: copy, drop, V>(table: &table::Table<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table::Table<K, V>, key: K): &V
 
@@ -650,7 +650,7 @@ Returns true iff table contains an ### Function `borrow_mut` -
public fun borrow_mut<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): &mut V
 
@@ -666,7 +666,7 @@ Returns true iff table contains an ### Function `borrow_mut_with_default` -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, default: V): &mut V
 
@@ -682,7 +682,7 @@ Returns true iff table contains an ### Function `upsert` -
public fun upsert<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, value: V)
 
@@ -698,7 +698,7 @@ Returns true iff table contains an ### Function `remove` -
public fun remove<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): V
 
@@ -714,7 +714,7 @@ Returns true iff table contains an ### Function `contains` -
public fun contains<K: copy, drop, V>(table: &table::Table<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table::Table<K, V>, key: K): bool
 
@@ -766,7 +766,7 @@ Returns true iff table contains an ### Function `destroy` -
public(friend) fun destroy<K: copy, drop, V>(table: table::Table<K, V>)
+
public(friend) fun destroy<K: copy, drop, V>(self: table::Table<K, V>)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/table_with_length.md b/aptos-move/framework/aptos-stdlib/doc/table_with_length.md index 1701578615e97..9bf3b27091eba 100644 --- a/aptos-move/framework/aptos-stdlib/doc/table_with_length.md +++ b/aptos-move/framework/aptos-stdlib/doc/table_with_length.md @@ -141,7 +141,7 @@ Create a new Table. Destroy a table. The table must be empty to succeed. -
public fun destroy_empty<K: copy, drop, V>(table: table_with_length::TableWithLength<K, V>)
+
public fun destroy_empty<K: copy, drop, V>(self: table_with_length::TableWithLength<K, V>)
 
@@ -150,9 +150,9 @@ Destroy a table. The table must be empty to succeed. Implementation -
public fun destroy_empty<K: copy + drop, V>(table: TableWithLength<K, V>) {
-    assert!(table.length == 0, error::invalid_state(ENOT_EMPTY));
-    let TableWithLength { inner, length: _ } = table;
+
public fun destroy_empty<K: copy + drop, V>(self: TableWithLength<K, V>) {
+    assert!(self.length == 0, error::invalid_state(ENOT_EMPTY));
+    let TableWithLength { inner, length: _ } = self;
     table::destroy(inner)
 }
 
@@ -170,7 +170,7 @@ key already exists. The entry itself is not stored in the table, and cannot be discovered from it. -
public fun add<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
 
@@ -179,9 +179,9 @@ table, and cannot be discovered from it. Implementation -
public fun add<K: copy + drop, V>(table: &mut TableWithLength<K, V>, key: K, val: V) {
-    table::add(&mut table.inner, key, val);
-    table.length = table.length + 1;
+
public fun add<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K, val: V) {
+    table::add(&mut self.inner, key, val);
+    self.length = self.length + 1;
 }
 
@@ -197,7 +197,7 @@ Acquire an immutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): &V
 
@@ -206,8 +206,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow<K: copy + drop, V>(table: &TableWithLength<K, V>, key: K): &V {
-    table::borrow(&table.inner, key)
+
public fun borrow<K: copy + drop, V>(self: &TableWithLength<K, V>, key: K): &V {
+    table::borrow(&self.inner, key)
 }
 
@@ -223,7 +223,7 @@ Acquire a mutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow_mut<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
 
@@ -232,8 +232,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow_mut<K: copy + drop, V>(table: &mut TableWithLength<K, V>, key: K): &mut V {
-    table::borrow_mut(&mut table.inner, key)
+
public fun borrow_mut<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K): &mut V {
+    table::borrow_mut(&mut self.inner, key)
 }
 
@@ -248,7 +248,7 @@ Aborts if there is no entry for key. Returns the length of the table, i.e. the number of entries. -
public fun length<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): u64
+
public fun length<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): u64
 
@@ -257,8 +257,8 @@ Returns the length of the table, i.e. the number of entries. Implementation -
public fun length<K: copy + drop, V>(table: &TableWithLength<K, V>): u64 {
-    table.length
+
public fun length<K: copy + drop, V>(self: &TableWithLength<K, V>): u64 {
+    self.length
 }
 
@@ -273,7 +273,7 @@ Returns the length of the table, i.e. the number of entries. Returns true if this table is empty. -
public fun empty<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): bool
+
public fun empty<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): bool
 
@@ -282,8 +282,8 @@ Returns true if this table is empty. Implementation -
public fun empty<K: copy + drop, V>(table: &TableWithLength<K, V>): bool {
-    table.length == 0
+
public fun empty<K: copy + drop, V>(self: &TableWithLength<K, V>): bool {
+    self.length == 0
 }
 
@@ -299,7 +299,7 @@ Acquire a mutable reference to the value which key maps to. Insert the pair (key, default) first if there is no entry for key. -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
 
@@ -308,13 +308,13 @@ Insert the pair (key, default) first if there is no en Implementation -
public fun borrow_mut_with_default<K: copy + drop, V: drop>(table: &mut TableWithLength<K, V>, key: K, default: V): &mut V {
-    if (table::contains(&table.inner, key)) {
-        table::borrow_mut(&mut table.inner, key)
+
public fun borrow_mut_with_default<K: copy + drop, V: drop>(self: &mut TableWithLength<K, V>, key: K, default: V): &mut V {
+    if (table::contains(&self.inner, key)) {
+        table::borrow_mut(&mut self.inner, key)
     } else {
-        table::add(&mut table.inner, key, default);
-        table.length = table.length + 1;
-        table::borrow_mut(&mut table.inner, key)
+        table::add(&mut self.inner, key, default);
+        self.length = self.length + 1;
+        table::borrow_mut(&mut self.inner, key)
     }
 }
 
@@ -331,7 +331,7 @@ Insert the pair (key, value) if there is no entry for update the value of the entry for key to value otherwise -
public fun upsert<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
 
@@ -340,11 +340,11 @@ update the value of the entry for key to value otherwi Implementation -
public fun upsert<K: copy + drop, V: drop>(table: &mut TableWithLength<K, V>, key: K, value: V) {
-    if (!table::contains(&table.inner, key)) {
-        add(table, copy key, value)
+
public fun upsert<K: copy + drop, V: drop>(self: &mut TableWithLength<K, V>, key: K, value: V) {
+    if (!table::contains(&self.inner, key)) {
+        add(self, copy key, value)
     } else {
-        let ref = table::borrow_mut(&mut table.inner, key);
+        let ref = table::borrow_mut(&mut self.inner, key);
         *ref = value;
     };
 }
@@ -362,7 +362,7 @@ Remove from table and return the v
 Aborts if there is no entry for key.
 
 
-
public fun remove<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): V
 
@@ -371,9 +371,9 @@ Aborts if there is no entry for key. Implementation -
public fun remove<K: copy + drop, V>(table: &mut TableWithLength<K, V>, key: K): V {
-    let val = table::remove(&mut table.inner, key);
-    table.length = table.length - 1;
+
public fun remove<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K): V {
+    let val = table::remove(&mut self.inner, key);
+    self.length = self.length - 1;
     val
 }
 
@@ -389,7 +389,7 @@ Aborts if there is no entry for key. Returns true iff table contains an entry for key. -
public fun contains<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): bool
 
@@ -398,8 +398,8 @@ Returns true iff table contains an Implementation -
public fun contains<K: copy + drop, V>(table: &TableWithLength<K, V>, key: K): bool {
-    table::contains(&table.inner, key)
+
public fun contains<K: copy + drop, V>(self: &TableWithLength<K, V>, key: K): bool {
+    table::contains(&self.inner, key)
 }
 
@@ -481,7 +481,7 @@ Returns true iff table contains an ### Function `destroy_empty` -
public fun destroy_empty<K: copy, drop, V>(table: table_with_length::TableWithLength<K, V>)
+
public fun destroy_empty<K: copy, drop, V>(self: table_with_length::TableWithLength<K, V>)
 
@@ -497,7 +497,7 @@ Returns true iff table contains an ### Function `add` -
public fun add<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
 
@@ -513,7 +513,7 @@ Returns true iff table contains an ### Function `borrow` -
public fun borrow<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): &V
 
@@ -529,7 +529,7 @@ Returns true iff table contains an ### Function `borrow_mut` -
public fun borrow_mut<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
 
@@ -545,7 +545,7 @@ Returns true iff table contains an ### Function `length` -
public fun length<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): u64
+
public fun length<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): u64
 
@@ -561,7 +561,7 @@ Returns true iff table contains an ### Function `empty` -
public fun empty<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): bool
+
public fun empty<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): bool
 
@@ -577,7 +577,7 @@ Returns true iff table contains an ### Function `borrow_mut_with_default` -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
 
@@ -594,7 +594,7 @@ Returns true iff table contains an ### Function `upsert` -
public fun upsert<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
 
@@ -610,7 +610,7 @@ Returns true iff table contains an ### Function `remove` -
public fun remove<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): V
 
@@ -626,7 +626,7 @@ Returns true iff table contains an ### Function `contains` -
public fun contains<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): bool
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/type_info.md b/aptos-move/framework/aptos-stdlib/doc/type_info.md index 1d9687e05701a..2e73ef96f80ab 100644 --- a/aptos-move/framework/aptos-stdlib/doc/type_info.md +++ b/aptos-move/framework/aptos-stdlib/doc/type_info.md @@ -93,7 +93,7 @@ -
public fun account_address(type_info: &type_info::TypeInfo): address
+
public fun account_address(self: &type_info::TypeInfo): address
 
@@ -102,8 +102,8 @@ Implementation -
public fun account_address(type_info: &TypeInfo): address {
-    type_info.account_address
+
public fun account_address(self: &TypeInfo): address {
+    self.account_address
 }
 
@@ -117,7 +117,7 @@ -
public fun module_name(type_info: &type_info::TypeInfo): vector<u8>
+
public fun module_name(self: &type_info::TypeInfo): vector<u8>
 
@@ -126,8 +126,8 @@ Implementation -
public fun module_name(type_info: &TypeInfo): vector<u8> {
-    type_info.module_name
+
public fun module_name(self: &TypeInfo): vector<u8> {
+    self.module_name
 }
 
@@ -141,7 +141,7 @@ -
public fun struct_name(type_info: &type_info::TypeInfo): vector<u8>
+
public fun struct_name(self: &type_info::TypeInfo): vector<u8>
 
@@ -150,8 +150,8 @@ Implementation -
public fun struct_name(type_info: &TypeInfo): vector<u8> {
-    type_info.struct_name
+
public fun struct_name(self: &TypeInfo): vector<u8> {
+    self.struct_name
 }
 
@@ -283,8 +283,7 @@ analysis of vector size dynamism.
public fun size_of_val<T>(val_ref: &T): u64 {
-    // Return vector length of vectorized BCS representation.
-    vector::length(&bcs::to_bytes(val_ref))
+    bcs::serialized_size(val_ref)
 }
 
@@ -451,8 +450,7 @@ analysis of vector size dynamism. -
aborts_if false;
-ensures result == spec_size_of_val<T>(val_ref);
+
ensures result == spec_size_of_val<T>(val_ref);
 
diff --git a/aptos-move/framework/aptos-stdlib/sources/any.move b/aptos-move/framework/aptos-stdlib/sources/any.move index d2851b77f44b4..480ff2460f09a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/any.move +++ b/aptos-move/framework/aptos-stdlib/sources/any.move @@ -36,14 +36,14 @@ module aptos_std::any { } /// Unpack a value from the `Any` representation. This aborts if the value has not the expected type `T`. - public fun unpack(x: Any): T { - assert!(type_info::type_name() == x.type_name, error::invalid_argument(ETYPE_MISMATCH)); - from_bytes(x.data) + public fun unpack(self: Any): T { + assert!(type_info::type_name() == self.type_name, error::invalid_argument(ETYPE_MISMATCH)); + from_bytes(self.data) } /// Returns the type name of this Any - public fun type_name(x: &Any): &String { - &x.type_name + public fun type_name(self: &Any): &String { + &self.type_name } #[test_only] diff --git a/aptos-move/framework/aptos-stdlib/sources/any.spec.move b/aptos-move/framework/aptos-stdlib/sources/any.spec.move index 2e55009e4bed3..47501c83d9e27 100644 --- a/aptos-move/framework/aptos-stdlib/sources/any.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/any.spec.move @@ -15,28 +15,28 @@ spec aptos_std::any { ensures [abstract] from_bcs::deserializable(result.data); } - spec unpack(x: Any): T { + spec unpack(self: Any): T { use aptos_std::from_bcs; include UnpackAbortsIf; - ensures result == from_bcs::deserialize(x.data); + ensures result == from_bcs::deserialize(self.data); } spec schema UnpackAbortsIf { use aptos_std::from_bcs; - x: Any; - aborts_if type_info::type_name() != x.type_name; - aborts_if !from_bcs::deserializable(x.data); + self: Any; + aborts_if type_info::type_name() != self.type_name; + aborts_if !from_bcs::deserializable(self.data); } spec schema UnpackRequirement { use aptos_std::from_bcs; - x: Any; - requires type_info::type_name() == x.type_name; - requires from_bcs::deserializable(x.data); + self: Any; + requires type_info::type_name() == self.type_name; + requires from_bcs::deserializable(self.data); } - spec type_name(x: &Any): &String { + spec type_name(self: &Any): &String { aborts_if false; - ensures result == x.type_name; + ensures result == self.type_name; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/capability.move b/aptos-move/framework/aptos-stdlib/sources/capability.move index b61c18ccc15e8..f68c7b0144589 100644 --- a/aptos-move/framework/aptos-stdlib/sources/capability.move +++ b/aptos-move/framework/aptos-stdlib/sources/capability.move @@ -146,34 +146,34 @@ module aptos_std::capability { /// Returns the root address associated with the given capability token. Only the owner /// of the feature can do this. - public fun root_addr(cap: Cap, _feature_witness: &Feature): address { - cap.root + public fun root_addr(self: Cap, _feature_witness: &Feature): address { + self.root } /// Returns the root address associated with the given linear capability token. - public fun linear_root_addr(cap: LinearCap, _feature_witness: &Feature): address { - cap.root + public fun linear_root_addr(self: LinearCap, _feature_witness: &Feature): address { + self.root } /// Registers a delegation relation. If the relation already exists, this function does /// nothing. // TODO: explore whether this should be idempotent like now or abort - public fun delegate(cap: Cap, _feature_witness: &Feature, to: &signer) + public fun delegate(self: Cap, _feature_witness: &Feature, to: &signer) acquires CapState { let addr = signer::address_of(to); if (exists>(addr)) return; - move_to(to, CapDelegateState { root: cap.root }); - add_element(&mut borrow_global_mut>(cap.root).delegates, addr); + move_to(to, CapDelegateState { root: self.root }); + add_element(&mut borrow_global_mut>(self.root).delegates, addr); } /// Revokes a delegation relation. If no relation exists, this function does nothing. // TODO: explore whether this should be idempotent like now or abort - public fun revoke(cap: Cap, _feature_witness: &Feature, from: address) + public fun revoke(self: Cap, _feature_witness: &Feature, from: address) acquires CapState, CapDelegateState { if (!exists>(from)) return; let CapDelegateState { root: _root } = move_from>(from); - remove_element(&mut borrow_global_mut>(cap.root).delegates, &from); + remove_element(&mut borrow_global_mut>(self.root).delegates, &from); } /// Helper to remove an element from a vector. diff --git a/aptos-move/framework/aptos-stdlib/sources/capability.spec.move b/aptos-move/framework/aptos-stdlib/sources/capability.spec.move index d7c6ba949f0a3..ef9af60e38197 100644 --- a/aptos-move/framework/aptos-stdlib/sources/capability.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/capability.spec.move @@ -44,14 +44,14 @@ spec aptos_std::capability { aborts_if !spec_has_delegate_cap(addr) && !spec_has_cap(addr); } - spec delegate(cap: Cap, _feature_witness: &Feature, to: &signer) { + spec delegate(self: Cap, _feature_witness: &Feature, to: &signer) { let addr = signer::address_of(to); ensures spec_has_delegate_cap(addr); - ensures !old(spec_has_delegate_cap(addr)) ==> global>(addr).root == cap.root; - ensures !old(spec_has_delegate_cap(addr)) ==> vector::spec_contains(spec_delegates(cap.root), addr); + ensures !old(spec_has_delegate_cap(addr)) ==> global>(addr).root == self.root; + ensures !old(spec_has_delegate_cap(addr)) ==> vector::spec_contains(spec_delegates(self.root), addr); } - spec revoke(cap: Cap, _feature_witness: &Feature, from: address) { + spec revoke(self: Cap, _feature_witness: &Feature, from: address) { ensures !spec_has_delegate_cap(from); // TODO: this cannot be proved. See issue #7422 // ensures old(spec_has_delegate_cap(from)) diff --git a/aptos-move/framework/aptos-stdlib/sources/comparator.move b/aptos-move/framework/aptos-stdlib/sources/comparator.move index 869b486b4ba5b..2a1a979613afe 100644 --- a/aptos-move/framework/aptos-stdlib/sources/comparator.move +++ b/aptos-move/framework/aptos-stdlib/sources/comparator.move @@ -11,16 +11,16 @@ module aptos_std::comparator { inner: u8, } - public fun is_equal(result: &Result): bool { - result.inner == EQUAL + public fun is_equal(self: &Result): bool { + self.inner == EQUAL } - public fun is_smaller_than(result: &Result): bool { - result.inner == SMALLER + public fun is_smaller_than(self: &Result): bool { + self.inner == SMALLER } - public fun is_greater_than(result: &Result): bool { - result.inner == GREATER + public fun is_greater_than(self: &Result): bool { + self.inner == GREATER } // Performs a comparison of two types after BCS serialization. diff --git a/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move b/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move index 0c14b444ad6e8..5e7d8e96a28a1 100644 --- a/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move @@ -3,21 +3,21 @@ spec aptos_std::comparator { invariant inner == EQUAL || inner == SMALLER || inner == GREATER; } - spec is_equal(result: &Result): bool { + spec is_equal(self: &Result): bool { aborts_if false; - let res = result; + let res = self; ensures result == (res.inner == EQUAL); } - spec is_smaller_than(result: &Result): bool { + spec is_smaller_than(self: &Result): bool { aborts_if false; - let res = result; + let res = self; ensures result == (res.inner == SMALLER); } - spec is_greater_than(result: &Result): bool { + spec is_greater_than(self: &Result): bool { aborts_if false; - let res = result; + let res = self; ensures result == (res.inner == GREATER); } diff --git a/aptos-move/framework/aptos-stdlib/sources/copyable_any.move b/aptos-move/framework/aptos-stdlib/sources/copyable_any.move index b12303a3f9237..cecb0a028bf9a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/copyable_any.move +++ b/aptos-move/framework/aptos-stdlib/sources/copyable_any.move @@ -24,14 +24,14 @@ module aptos_std::copyable_any { } /// Unpack a value from the `Any` representation. This aborts if the value has not the expected type `T`. - public fun unpack(x: Any): T { - assert!(type_info::type_name() == x.type_name, error::invalid_argument(ETYPE_MISMATCH)); - from_bytes(x.data) + public fun unpack(self: Any): T { + assert!(type_info::type_name() == self.type_name, error::invalid_argument(ETYPE_MISMATCH)); + from_bytes(self.data) } /// Returns the type name of this Any - public fun type_name(x: &Any): &String { - &x.type_name + public fun type_name(self: &Any): &String { + &self.type_name } #[test_only] diff --git a/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move b/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move index d1d64a81a4fac..f9a27e4c33838 100644 --- a/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move @@ -16,21 +16,21 @@ spec aptos_std::copyable_any { ensures [abstract] from_bcs::deserializable(result.data); } - spec unpack(x: Any): T { + spec unpack(self: Any): T { use aptos_std::from_bcs; include UnpackAbortsIf; - ensures result == from_bcs::deserialize(x.data); + ensures result == from_bcs::deserialize(self.data); } spec schema UnpackAbortsIf { use aptos_std::from_bcs; - x: Any; - aborts_if type_info::type_name() != x.type_name; - aborts_if !from_bcs::deserializable(x.data); + self: Any; + aborts_if type_info::type_name() != self.type_name; + aborts_if !from_bcs::deserializable(self.data); } - spec type_name(x: &Any): &String { + spec type_name(self: &Any): &String { aborts_if false; - ensures result == x.type_name; + ensures result == self.type_name; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move index a7eca39732823..ce381e9e0d7cc 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move @@ -40,17 +40,17 @@ module aptos_std::big_vector { v } - /// Destroy the vector `v`. - /// Aborts if `v` is not empty. - public fun destroy_empty(v: BigVector) { - assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY)); - let BigVector { buckets, end_index: _, bucket_size: _ } = v; + /// Destroy the vector `self`. + /// Aborts if `self` is not empty. + public fun destroy_empty(self: BigVector) { + assert!(is_empty(&self), error::invalid_argument(EVECTOR_NOT_EMPTY)); + let BigVector { buckets, end_index: _, bucket_size: _ } = self; table_with_length::destroy_empty(buckets); } - /// Destroy the vector `v` if T has `drop` - public fun destroy(v: BigVector) { - let BigVector { buckets, end_index, bucket_size: _ } = v; + /// Destroy the vector `self` if T has `drop` + public fun destroy(self: BigVector) { + let BigVector { buckets, end_index, bucket_size: _ } = self; let i = 0; while (end_index > 0) { let num_elements = vector::length(&table_with_length::remove(&mut buckets, i)); @@ -60,93 +60,93 @@ module aptos_std::big_vector { table_with_length::destroy_empty(buckets); } - /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Acquire an immutable reference to the `i`th element of the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow(v: &BigVector, i: u64): &T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - vector::borrow(table_with_length::borrow(&v.buckets, i / v.bucket_size), i % v.bucket_size) + public fun borrow(self: &BigVector, i: u64): &T { + assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + vector::borrow(table_with_length::borrow(&self.buckets, i / self.bucket_size), i % self.bucket_size) } - /// Return a mutable reference to the `i`th element in the vector `v`. + /// Return a mutable reference to the `i`th element in the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow_mut(v: &mut BigVector, i: u64): &mut T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - vector::borrow_mut(table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size), i % v.bucket_size) + public fun borrow_mut(self: &mut BigVector, i: u64): &mut T { + assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + vector::borrow_mut(table_with_length::borrow_mut(&mut self.buckets, i / self.bucket_size), i % self.bucket_size) } - /// Empty and destroy the other vector, and push each of the elements in the other vector onto the lhs vector in the + /// Empty and destroy the other vector, and push each of the elements in the other vector onto the self vector in the /// same order as they occurred in other. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun append(lhs: &mut BigVector, other: BigVector) { + public fun append(self: &mut BigVector, other: BigVector) { let other_len = length(&other); let half_other_len = other_len / 2; let i = 0; while (i < half_other_len) { - push_back(lhs, swap_remove(&mut other, i)); + push_back(self, swap_remove(&mut other, i)); i = i + 1; }; while (i < other_len) { - push_back(lhs, pop_back(&mut other)); + push_back(self, pop_back(&mut other)); i = i + 1; }; destroy_empty(other); } - /// Add element `val` to the end of the vector `v`. It grows the buckets when the current buckets are full. + /// Add element `val` to the end of the vector `self`. It grows the buckets when the current buckets are full. /// This operation will cost more gas when it adds new bucket. - public fun push_back(v: &mut BigVector, val: T) { - let num_buckets = table_with_length::length(&v.buckets); - if (v.end_index == num_buckets * v.bucket_size) { - table_with_length::add(&mut v.buckets, num_buckets, vector::empty()); - vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets), val); + public fun push_back(self: &mut BigVector, val: T) { + let num_buckets = table_with_length::length(&self.buckets); + if (self.end_index == num_buckets * self.bucket_size) { + table_with_length::add(&mut self.buckets, num_buckets, vector::empty()); + vector::push_back(table_with_length::borrow_mut(&mut self.buckets, num_buckets), val); } else { - vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1), val); + vector::push_back(table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1), val); }; - v.end_index = v.end_index + 1; + self.end_index = self.end_index + 1; } - /// Pop an element from the end of vector `v`. It doesn't shrink the buckets even if they're empty. + /// Pop an element from the end of vector `self`. It doesn't shrink the buckets even if they're empty. /// Call `shrink_to_fit` explicity to deallocate empty buckets. - /// Aborts if `v` is empty. - public fun pop_back(v: &mut BigVector): T { - assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY)); - let num_buckets = table_with_length::length(&v.buckets); - let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1); + /// Aborts if `self` is empty. + public fun pop_back(self: &mut BigVector): T { + assert!(!is_empty(self), error::invalid_state(EVECTOR_EMPTY)); + let num_buckets = table_with_length::length(&self.buckets); + let last_bucket = table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1); let val = vector::pop_back(last_bucket); // Shrink the table if the last vector is empty. if (vector::is_empty(last_bucket)) { move last_bucket; - vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1)); + vector::destroy_empty(table_with_length::remove(&mut self.buckets, num_buckets - 1)); }; - v.end_index = v.end_index - 1; + self.end_index = self.end_index - 1; val } - /// Remove the element at index i in the vector v and return the owned value that was previously stored at i in v. + /// Remove the element at index i in the vector v and return the owned value that was previously stored at i in self. /// All elements occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun remove(v: &mut BigVector, i: u64): T { - let len = length(v); + public fun remove(self: &mut BigVector, i: u64): T { + let len = length(self); assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let num_buckets = table_with_length::length(&v.buckets); - let cur_bucket_index = i / v.bucket_size + 1; - let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1); - let res = vector::remove(cur_bucket, i % v.bucket_size); - v.end_index = v.end_index - 1; + let num_buckets = table_with_length::length(&self.buckets); + let cur_bucket_index = i / self.bucket_size + 1; + let cur_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index - 1); + let res = vector::remove(cur_bucket, i % self.bucket_size); + self.end_index = self.end_index - 1; move cur_bucket; while ({ spec { invariant cur_bucket_index <= num_buckets; - invariant table_with_length::spec_len(v.buckets) == num_buckets; + invariant table_with_length::spec_len(self.buckets) == num_buckets; }; (cur_bucket_index < num_buckets) }) { // remove one element from the start of current vector - let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index); + let cur_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index); let t = vector::remove(cur_bucket, 0); move cur_bucket; // and put it at the end of the last one - let prev_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1); + let prev_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index - 1); vector::push_back(prev_bucket, t); cur_bucket_index = cur_bucket_index + 1; }; @@ -155,50 +155,50 @@ module aptos_std::big_vector { }; // Shrink the table if the last vector is empty. - let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1); + let last_bucket = table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1); if (vector::is_empty(last_bucket)) { move last_bucket; - vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1)); + vector::destroy_empty(table_with_length::remove(&mut self.buckets, num_buckets - 1)); }; res } - /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// Swap the `i`th element of the vector `self` with the last element and then pop the vector. /// This is O(1), but does not preserve ordering of elements in the vector. /// Aborts if `i` is out of bounds. - public fun swap_remove(v: &mut BigVector, i: u64): T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let last_val = pop_back(v); + public fun swap_remove(self: &mut BigVector, i: u64): T { + assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let last_val = pop_back(self); // if the requested value is the last one, return it - if (v.end_index == i) { + if (self.end_index == i) { return last_val }; // because the lack of mem::swap, here we swap remove the requested value from the bucket // and append the last_val to the bucket then swap the last bucket val back - let bucket = table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size); + let bucket = table_with_length::borrow_mut(&mut self.buckets, i / self.bucket_size); let bucket_len = vector::length(bucket); - let val = vector::swap_remove(bucket, i % v.bucket_size); + let val = vector::swap_remove(bucket, i % self.bucket_size); vector::push_back(bucket, last_val); - vector::swap(bucket, i % v.bucket_size, bucket_len - 1); + vector::swap(bucket, i % self.bucket_size, bucket_len - 1); val } - /// Swap the elements at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds - /// for v. - public fun swap(v: &mut BigVector, i: u64, j: u64) { - assert!(i < length(v) && j < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let i_bucket_index = i / v.bucket_size; - let j_bucket_index = j / v.bucket_size; - let i_vector_index = i % v.bucket_size; - let j_vector_index = j % v.bucket_size; + /// Swap the elements at the i'th and j'th indices in the vector self. Will abort if either of i or j are out of bounds + /// for self. + public fun swap(self: &mut BigVector, i: u64, j: u64) { + assert!(i < length(self) && j < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let i_bucket_index = i / self.bucket_size; + let j_bucket_index = j / self.bucket_size; + let i_vector_index = i % self.bucket_size; + let j_vector_index = j % self.bucket_size; if (i_bucket_index == j_bucket_index) { - vector::swap(table_with_length::borrow_mut(&mut v.buckets, i_bucket_index), i_vector_index, j_vector_index); + vector::swap(table_with_length::borrow_mut(&mut self.buckets, i_bucket_index), i_vector_index, j_vector_index); return }; // If i and j are in different buckets, take the buckets out first for easy mutation. - let bucket_i = table_with_length::remove(&mut v.buckets, i_bucket_index); - let bucket_j = table_with_length::remove(&mut v.buckets, j_bucket_index); + let bucket_i = table_with_length::remove(&mut self.buckets, i_bucket_index); + let bucket_j = table_with_length::remove(&mut self.buckets, j_bucket_index); // Get the elements from buckets by calling `swap_remove`. let element_i = vector::swap_remove(&mut bucket_i, i_vector_index); let element_j = vector::swap_remove(&mut bucket_j, j_vector_index); @@ -211,23 +211,23 @@ module aptos_std::big_vector { vector::swap(&mut bucket_i, i_vector_index, last_index_in_bucket_i); vector::swap(&mut bucket_j, j_vector_index, last_index_in_bucket_j); // Add back the buckets. - table_with_length::add(&mut v.buckets, i_bucket_index, bucket_i); - table_with_length::add(&mut v.buckets, j_bucket_index, bucket_j); + table_with_length::add(&mut self.buckets, i_bucket_index, bucket_i); + table_with_length::add(&mut self.buckets, j_bucket_index, bucket_j); } - /// Reverse the order of the elements in the vector v in-place. + /// Reverse the order of the elements in the vector self in-place. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun reverse(v: &mut BigVector) { + public fun reverse(self: &mut BigVector) { let new_buckets = vector[]; let push_bucket = vector[]; - let num_buckets = table_with_length::length(&v.buckets); + let num_buckets = table_with_length::length(&self.buckets); let num_buckets_left = num_buckets; while (num_buckets_left > 0) { - let pop_bucket = table_with_length::remove(&mut v.buckets, num_buckets_left - 1); + let pop_bucket = table_with_length::remove(&mut self.buckets, num_buckets_left - 1); vector::for_each_reverse(pop_bucket, |val| { vector::push_back(&mut push_bucket, val); - if (vector::length(&push_bucket) == v.bucket_size) { + if (vector::length(&push_bucket) == self.bucket_size) { vector::push_back(&mut new_buckets, push_bucket); push_bucket = vector[]; }; @@ -243,61 +243,61 @@ module aptos_std::big_vector { vector::reverse(&mut new_buckets); let i = 0; - assert!(table_with_length::length(&v.buckets) == 0, 0); + assert!(table_with_length::length(&self.buckets) == 0, 0); while (i < num_buckets) { - table_with_length::add(&mut v.buckets, i, vector::pop_back(&mut new_buckets)); + table_with_length::add(&mut self.buckets, i, vector::pop_back(&mut new_buckets)); i = i + 1; }; vector::destroy_empty(new_buckets); } - /// Return the index of the first occurrence of an element in v that is equal to e. Returns (true, index) if such an + /// Return the index of the first occurrence of an element in self that is equal to e. Returns (true, index) if such an /// element was found, and (false, 0) otherwise. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun index_of(v: &BigVector, val: &T): (bool, u64) { - let num_buckets = table_with_length::length(&v.buckets); + public fun index_of(self: &BigVector, val: &T): (bool, u64) { + let num_buckets = table_with_length::length(&self.buckets); let bucket_index = 0; while (bucket_index < num_buckets) { - let cur = table_with_length::borrow(&v.buckets, bucket_index); + let cur = table_with_length::borrow(&self.buckets, bucket_index); let (found, i) = vector::index_of(cur, val); if (found) { - return (true, bucket_index * v.bucket_size + i) + return (true, bucket_index * self.bucket_size + i) }; bucket_index = bucket_index + 1; }; (false, 0) } - /// Return if an element equal to e exists in the vector v. + /// Return if an element equal to e exists in the vector self. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun contains(v: &BigVector, val: &T): bool { - if (is_empty(v)) return false; - let (exist, _) = index_of(v, val); + public fun contains(self: &BigVector, val: &T): bool { + if (is_empty(self)) return false; + let (exist, _) = index_of(self, val); exist } /// Convert a big vector to a native vector, which is supposed to be called mostly by view functions to get an /// atomic view of the whole vector. /// Disclaimer: This function may be costly as the big vector may be huge in size. Use it at your own discretion. - public fun to_vector(v: &BigVector): vector { + public fun to_vector(self: &BigVector): vector { let res = vector[]; - let num_buckets = table_with_length::length(&v.buckets); + let num_buckets = table_with_length::length(&self.buckets); let i = 0; while (i < num_buckets) { - vector::append(&mut res, *table_with_length::borrow(&v.buckets, i)); + vector::append(&mut res, *table_with_length::borrow(&self.buckets, i)); i = i + 1; }; res } /// Return the length of the vector. - public fun length(v: &BigVector): u64 { - v.end_index + public fun length(self: &BigVector): u64 { + self.end_index } /// Return `true` if the vector `v` has no elements and `false` otherwise. - public fun is_empty(v: &BigVector): bool { - length(v) == 0 + public fun is_empty(self: &BigVector): bool { + length(self) == 0 } #[test] diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move index 5556d4d3d2a8f..86e971c13025a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move @@ -52,75 +52,75 @@ spec aptos_std::big_vector { ensures result.bucket_size == bucket_size; } - spec destroy_empty(v: BigVector) { - aborts_if !is_empty(v); + spec destroy_empty(self: BigVector) { + aborts_if !is_empty(self); } - spec borrow(v: &BigVector, i: u64): &T { - aborts_if i >= length(v); - ensures result == spec_at(v, i); + spec borrow(self: &BigVector, i: u64): &T { + aborts_if i >= length(self); + ensures result == spec_at(self, i); } - spec borrow_mut(v: &mut BigVector, i: u64): &mut T { - aborts_if i >= length(v); - ensures result == spec_at(v, i); + spec borrow_mut(self: &mut BigVector, i: u64): &mut T { + aborts_if i >= length(self); + ensures result == spec_at(self, i); } - spec push_back(v: &mut BigVector, val: T) { - let num_buckets = spec_table_len(v.buckets); + spec push_back(self: &mut BigVector, val: T) { + let num_buckets = spec_table_len(self.buckets); include PushbackAbortsIf; - ensures length(v) == length(old(v)) + 1; - ensures v.end_index == old(v.end_index) + 1; - ensures spec_at(v, v.end_index-1) == val; - ensures forall i in 0..v.end_index-1: spec_at(v, i) == spec_at(old(v), i); - ensures v.bucket_size == old(v).bucket_size; + ensures length(self) == length(old(self)) + 1; + ensures self.end_index == old(self.end_index) + 1; + ensures spec_at(self, self.end_index-1) == val; + ensures forall i in 0..self.end_index-1: spec_at(self, i) == spec_at(old(self), i); + ensures self.bucket_size == old(self).bucket_size; } spec schema PushbackAbortsIf { - v: BigVector; - let num_buckets = spec_table_len(v.buckets); - aborts_if num_buckets * v.bucket_size > MAX_U64; - aborts_if v.end_index + 1 > MAX_U64; + self: BigVector; + let num_buckets = spec_table_len(self.buckets); + aborts_if num_buckets * self.bucket_size > MAX_U64; + aborts_if self.end_index + 1 > MAX_U64; } - spec pop_back(v: &mut BigVector): T { - aborts_if is_empty(v); - ensures length(v) == length(old(v)) - 1; - ensures result == old(spec_at(v, v.end_index-1)); - ensures forall i in 0..v.end_index: spec_at(v, i) == spec_at(old(v), i); + spec pop_back(self: &mut BigVector): T { + aborts_if is_empty(self); + ensures length(self) == length(old(self)) - 1; + ensures result == old(spec_at(self, self.end_index-1)); + ensures forall i in 0..self.end_index: spec_at(self, i) == spec_at(old(self), i); } - spec swap_remove(v: &mut BigVector, i: u64): T { + spec swap_remove(self: &mut BigVector, i: u64): T { pragma verify_duration_estimate = 120; - aborts_if i >= length(v); - ensures length(v) == length(old(v)) - 1; - ensures result == spec_at(old(v), i); + aborts_if i >= length(self); + ensures length(self) == length(old(self)) - 1; + ensures result == spec_at(old(self), i); } - spec swap(v: &mut BigVector, i: u64, j: u64) { + spec swap(self: &mut BigVector, i: u64, j: u64) { pragma verify_duration_estimate = 1000; - aborts_if i >= length(v) || j >= length(v); - ensures length(v) == length(old(v)); - ensures spec_at(v, i) == spec_at(old(v), j); - ensures spec_at(v, j) == spec_at(old(v), i); - ensures forall idx in 0..length(v) + aborts_if i >= length(self) || j >= length(self); + ensures length(self) == length(old(self)); + ensures spec_at(self, i) == spec_at(old(self), j); + ensures spec_at(self, j) == spec_at(old(self), i); + ensures forall idx in 0..length(self) where idx != i && idx != j: - spec_at(v, idx) == spec_at(old(v), idx); + spec_at(self, idx) == spec_at(old(self), idx); } - spec append(lhs: &mut BigVector, other: BigVector) { + spec append(self: &mut BigVector, other: BigVector) { pragma verify=false; } - spec remove(v: &mut BigVector, i: u64): T { + spec remove(self: &mut BigVector, i: u64): T { pragma verify=false; } - spec reverse(v: &mut BigVector) { + spec reverse(self: &mut BigVector) { pragma verify=false; } - spec index_of(v: &BigVector, val: &T): (bool, u64) { + spec index_of(self: &BigVector, val: &T): (bool, u64) { pragma verify=false; } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move index 60a9565d0a221..c9c36712738da 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move @@ -95,34 +95,34 @@ module aptos_std::smart_table { /// Destroy empty table. /// Aborts if it's not empty. - public fun destroy_empty(table: SmartTable) { - assert!(table.size == 0, error::invalid_argument(ENOT_EMPTY)); + public fun destroy_empty(self: SmartTable) { + assert!(self.size == 0, error::invalid_argument(ENOT_EMPTY)); let i = 0; - while (i < table.num_buckets) { - vector::destroy_empty(table_with_length::remove(&mut table.buckets, i)); + while (i < self.num_buckets) { + vector::destroy_empty(table_with_length::remove(&mut self.buckets, i)); i = i + 1; }; - let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = table; + let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = self; table_with_length::destroy_empty(buckets); } /// Destroy a table completely when V has `drop`. - public fun destroy(table: SmartTable) { - clear(&mut table); - destroy_empty(table); + public fun destroy(self: SmartTable) { + clear(&mut self); + destroy_empty(self); } /// Clear a table completely when T has `drop`. - public fun clear(table: &mut SmartTable) { - *table_with_length::borrow_mut(&mut table.buckets, 0) = vector::empty(); + public fun clear(self: &mut SmartTable) { + *table_with_length::borrow_mut(&mut self.buckets, 0) = vector::empty(); let i = 1; - while (i < table.num_buckets) { - table_with_length::remove(&mut table.buckets, i); + while (i < self.num_buckets) { + table_with_length::remove(&mut self.buckets, i); i = i + 1; }; - table.num_buckets = 1; - table.level = 0; - table.size = 0; + self.num_buckets = 1; + self.level = 0; + self.size = 0; } /// Add (key, value) pair in the hash map, it may grow one bucket if current load factor exceeds the threshold. @@ -130,10 +130,10 @@ module aptos_std::smart_table { /// For standard linear hash algorithm, it is stored as a variable but `num_buckets` here could be leveraged. /// Abort if `key` already exists. /// Note: This method may occasionally cost much more gas when triggering bucket split. - public fun add(table: &mut SmartTable, key: K, value: V) { + public fun add(self: &mut SmartTable, key: K, value: V) { let hash = sip_hash_from_value(&key); - let index = bucket_index(table.level, table.num_buckets, hash); - let bucket = table_with_length::borrow_mut(&mut table.buckets, index); + let index = bucket_index(self.level, self.num_buckets, hash); + let bucket = table_with_length::borrow_mut(&mut self.buckets, index); // We set a per-bucket limit here with a upper bound (10000) that nobody should normally reach. assert!(vector::length(bucket) <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE)); assert!(vector::all(bucket, | entry | { @@ -141,21 +141,21 @@ module aptos_std::smart_table { &e.key != &key }), error::invalid_argument(EALREADY_EXIST)); let e = Entry { hash, key, value }; - if (table.target_bucket_size == 0) { + if (self.target_bucket_size == 0) { let estimated_entry_size = max(size_of_val(&e), 1); - table.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1); + self.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1); }; vector::push_back(bucket, e); - table.size = table.size + 1; + self.size = self.size + 1; - if (load_factor(table) >= (table.split_load_threshold as u64)) { - split_one_bucket(table); + if (load_factor(self) >= (self.split_load_threshold as u64)) { + split_one_bucket(self); } } /// Add multiple key/value pairs to the smart table. The keys must not already exist. - public fun add_all(table: &mut SmartTable, keys: vector, values: vector) { - vector::zip(keys, values, |key, value| { add(table, key, value); }); + public fun add_all(self: &mut SmartTable, keys: vector, values: vector) { + vector::zip(keys, values, |key, value| { add(self, key, value); }); } inline fun unzip_entries(entries: &vector>): (vector, vector) { @@ -173,12 +173,12 @@ module aptos_std::smart_table { /// view of the whole table. /// Disclaimer: This function may be costly as the smart table may be huge in size. Use it at your own discretion. public fun to_simple_map( - table: &SmartTable, + self: &SmartTable, ): SimpleMap { let i = 0; let res = simple_map::new(); - while (i < table.num_buckets) { - let (keys, values) = unzip_entries(table_with_length::borrow(&table.buckets, i)); + while (i < self.num_buckets) { + let (keys, values) = unzip_entries(table_with_length::borrow(&self.buckets, i)); simple_map::add_all(&mut res, keys, values); i = i + 1; }; @@ -190,9 +190,9 @@ module aptos_std::smart_table { /// For a large enough smart table this function will fail due to execution gas limits, and /// `keys_paginated` should be used instead. public fun keys( - table_ref: &SmartTable + self: &SmartTable ): vector { - let (keys, _, _) = keys_paginated(table_ref, 0, 0, length(table_ref)); + let (keys, _, _) = keys_paginated(self, 0, 0, length(self)); keys } @@ -210,7 +210,7 @@ module aptos_std::smart_table { /// returned bucket index and vector index value options are both none, which means that /// pagination is complete. For an example, see `test_keys()`. public fun keys_paginated( - table_ref: &SmartTable, + self: &SmartTable, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64, @@ -219,8 +219,8 @@ module aptos_std::smart_table { Option, Option, ) { - let num_buckets = table_ref.num_buckets; - let buckets_ref = &table_ref.buckets; + let num_buckets = self.num_buckets; + let buckets_ref = &self.buckets; assert!(starting_bucket_index < num_buckets, EINVALID_BUCKET_INDEX); let bucket_ref = table_with_length::borrow(buckets_ref, starting_bucket_index); let bucket_length = vector::length(bucket_ref); @@ -262,23 +262,23 @@ module aptos_std::smart_table { } /// Decide which is the next bucket to split and split it into two with the elements inside the bucket. - fun split_one_bucket(table: &mut SmartTable) { - let new_bucket_index = table.num_buckets; + fun split_one_bucket(self: &mut SmartTable) { + let new_bucket_index = self.num_buckets; // the next bucket to split is num_bucket without the most significant bit. - let to_split = new_bucket_index ^ (1 << table.level); - table.num_buckets = new_bucket_index + 1; + let to_split = new_bucket_index ^ (1 << self.level); + self.num_buckets = new_bucket_index + 1; // if the whole level is splitted once, bump the level. - if (to_split + 1 == 1 << table.level) { - table.level = table.level + 1; + if (to_split + 1 == 1 << self.level) { + self.level = self.level + 1; }; - let old_bucket = table_with_length::borrow_mut(&mut table.buckets, to_split); + let old_bucket = table_with_length::borrow_mut(&mut self.buckets, to_split); // partition the bucket, [0..p) stays in old bucket, [p..len) goes to new bucket let p = vector::partition(old_bucket, |e| { let entry: &Entry = e; // Explicit type to satisfy compiler - bucket_index(table.level, table.num_buckets, entry.hash) != new_bucket_index + bucket_index(self.level, self.num_buckets, entry.hash) != new_bucket_index }); let new_bucket = vector::trim_reverse(old_bucket, p); - table_with_length::add(&mut table.buckets, new_bucket_index, new_bucket); + table_with_length::add(&mut self.buckets, new_bucket_index, new_bucket); } /// Return the expected bucket index to find the hash. @@ -297,9 +297,9 @@ module aptos_std::smart_table { /// Acquire an immutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow(table: &SmartTable, key: K): &V { - let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key)); - let bucket = table_with_length::borrow(&table.buckets, index); + public fun borrow(self: &SmartTable, key: K): &V { + let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key)); + let bucket = table_with_length::borrow(&self.buckets, index); let i = 0; let len = vector::length(bucket); while (i < len) { @@ -314,19 +314,19 @@ module aptos_std::smart_table { /// Acquire an immutable reference to the value which `key` maps to. /// Returns specified default value if there is no entry for `key`. - public fun borrow_with_default(table: &SmartTable, key: K, default: &V): &V { - if (!contains(table, copy key)) { + public fun borrow_with_default(self: &SmartTable, key: K, default: &V): &V { + if (!contains(self, copy key)) { default } else { - borrow(table, copy key) + borrow(self, copy key) } } /// Acquire a mutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow_mut(table: &mut SmartTable, key: K): &mut V { - let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key)); - let bucket = table_with_length::borrow_mut(&mut table.buckets, index); + public fun borrow_mut(self: &mut SmartTable, key: K): &mut V { + let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key)); + let bucket = table_with_length::borrow_mut(&mut self.buckets, index); let i = 0; let len = vector::length(bucket); while (i < len) { @@ -342,21 +342,21 @@ module aptos_std::smart_table { /// Acquire a mutable reference to the value which `key` maps to. /// Insert the pair (`key`, `default`) first if there is no entry for `key`. public fun borrow_mut_with_default( - table: &mut SmartTable, + self: &mut SmartTable, key: K, default: V ): &mut V { - if (!contains(table, copy key)) { - add(table, copy key, default) + if (!contains(self, copy key)) { + add(self, copy key, default) }; - borrow_mut(table, key) + borrow_mut(self, key) } /// Returns true iff `table` contains an entry for `key`. - public fun contains(table: &SmartTable, key: K): bool { + public fun contains(self: &SmartTable, key: K): bool { let hash = sip_hash_from_value(&key); - let index = bucket_index(table.level, table.num_buckets, hash); - let bucket = table_with_length::borrow(&table.buckets, index); + let index = bucket_index(self.level, self.num_buckets, hash); + let bucket = table_with_length::borrow(&self.buckets, index); vector::any(bucket, | entry | { let e: &Entry = entry; e.hash == hash && &e.key == &key @@ -365,16 +365,16 @@ module aptos_std::smart_table { /// Remove from `table` and return the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun remove(table: &mut SmartTable, key: K): V { - let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key)); - let bucket = table_with_length::borrow_mut(&mut table.buckets, index); + public fun remove(self: &mut SmartTable, key: K): V { + let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key)); + let bucket = table_with_length::borrow_mut(&mut self.buckets, index); let i = 0; let len = vector::length(bucket); while (i < len) { let entry = vector::borrow(bucket, i); if (&entry.key == &key) { let Entry { hash: _, key: _, value } = vector::swap_remove(bucket, i); - table.size = table.size - 1; + self.size = self.size - 1; return value }; i = i + 1; @@ -384,46 +384,46 @@ module aptos_std::smart_table { /// Insert the pair (`key`, `value`) if there is no entry for `key`. /// update the value of the entry for `key` to `value` otherwise - public fun upsert(table: &mut SmartTable, key: K, value: V) { - if (!contains(table, copy key)) { - add(table, copy key, value) + public fun upsert(self: &mut SmartTable, key: K, value: V) { + if (!contains(self, copy key)) { + add(self, copy key, value) } else { - let ref = borrow_mut(table, key); + let ref = borrow_mut(self, key); *ref = value; }; } /// Returns the length of the table, i.e. the number of entries. - public fun length(table: &SmartTable): u64 { - table.size + public fun length(self: &SmartTable): u64 { + self.size } /// Return the load factor of the hashtable. - public fun load_factor(table: &SmartTable): u64 { - table.size * 100 / table.num_buckets / table.target_bucket_size + public fun load_factor(self: &SmartTable): u64 { + self.size * 100 / self.num_buckets / self.target_bucket_size } /// Update `split_load_threshold`. - public fun update_split_load_threshold(table: &mut SmartTable, split_load_threshold: u8) { + public fun update_split_load_threshold(self: &mut SmartTable, split_load_threshold: u8) { assert!( split_load_threshold <= 100 && split_load_threshold > 0, error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT) ); - table.split_load_threshold = split_load_threshold; + self.split_load_threshold = split_load_threshold; } /// Update `target_bucket_size`. - public fun update_target_bucket_size(table: &mut SmartTable, target_bucket_size: u64) { + public fun update_target_bucket_size(self: &mut SmartTable, target_bucket_size: u64) { assert!(target_bucket_size > 0, error::invalid_argument(EINVALID_TARGET_BUCKET_SIZE)); - table.target_bucket_size = target_bucket_size; + self.target_bucket_size = target_bucket_size; } /// Apply the function to a reference of each key-value pair in the table. - public inline fun for_each_ref(table: &SmartTable, f: |&K, &V|) { + public inline fun for_each_ref(self: &SmartTable, f: |&K, &V|) { let i = 0; - while (i < aptos_std::smart_table::num_buckets(table)) { + while (i < aptos_std::smart_table::num_buckets(self)) { vector::for_each_ref( - aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i), + aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(self), i), |elem| { let (key, value) = aptos_std::smart_table::borrow_kv(elem); f(key, value) @@ -434,11 +434,11 @@ module aptos_std::smart_table { } /// Apply the function to a mutable reference of each key-value pair in the table. - public inline fun for_each_mut(table: &mut SmartTable, f: |&K, &mut V|) { + public inline fun for_each_mut(self: &mut SmartTable, f: |&K, &mut V|) { let i = 0; - while (i < aptos_std::smart_table::num_buckets(table)) { + while (i < aptos_std::smart_table::num_buckets(self)) { vector::for_each_mut( - table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(table), i), + table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(self), i), |elem| { let (key, value) = aptos_std::smart_table::borrow_kv_mut(elem); f(key, value) @@ -450,23 +450,23 @@ module aptos_std::smart_table { /// Map the function over the references of key-value pairs in the table without modifying it. public inline fun map_ref( - table: &SmartTable, + self: &SmartTable, f: |&V1|V2 ): SmartTable { let new_table = new(); - for_each_ref(table, |key, value| add(&mut new_table, *key, f(value))); + for_each_ref(self, |key, value| add(&mut new_table, *key, f(value))); new_table } /// Return true if any key-value pair in the table satisfies the predicate. public inline fun any( - table: &SmartTable, + self: &SmartTable, p: |&K, &V|bool ): bool { let found = false; let i = 0; - while (i < aptos_std::smart_table::num_buckets(table)) { - found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i), |elem| { + while (i < aptos_std::smart_table::num_buckets(self)) { + found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(self), i), |elem| { let (key, value) = aptos_std::smart_table::borrow_kv(elem); p(key, value) }); @@ -477,24 +477,24 @@ module aptos_std::smart_table { } // Helper functions to circumvent the scope issue of inline functions. - public fun borrow_kv(e: &Entry): (&K, &V) { - (&e.key, &e.value) + public fun borrow_kv(self: &Entry): (&K, &V) { + (&self.key, &self.value) } - public fun borrow_kv_mut(e: &mut Entry): (&mut K, &mut V) { - (&mut e.key, &mut e.value) + public fun borrow_kv_mut(self: &mut Entry): (&mut K, &mut V) { + (&mut self.key, &mut self.value) } - public fun num_buckets(table: &SmartTable): u64 { - table.num_buckets + public fun num_buckets(self: &SmartTable): u64 { + self.num_buckets } - public fun borrow_buckets(table: &SmartTable): &TableWithLength>> { - &table.buckets + public fun borrow_buckets(self: &SmartTable): &TableWithLength>> { + &self.buckets } - public fun borrow_buckets_mut(table: &mut SmartTable): &mut TableWithLength>> { - &mut table.buckets + public fun borrow_buckets_mut(self: &mut SmartTable): &mut TableWithLength>> { + &mut self.buckets } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move index 113bb4f06cabf..d905a0a40bb3a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move @@ -22,15 +22,15 @@ spec aptos_std::smart_table { pragma verify = false; } - spec destroy(table: SmartTable) { + spec destroy(self: SmartTable) { pragma verify = false; } - spec clear(table: &mut SmartTable) { + spec clear(self: &mut SmartTable) { pragma verify = false; } - spec split_one_bucket(table: &mut SmartTable) { + spec split_one_bucket(self: &mut SmartTable) { pragma verify = false; } @@ -38,26 +38,26 @@ spec aptos_std::smart_table { pragma verify = false; } - spec borrow_with_default(table: &SmartTable, key: K, default: &V): &V { + spec borrow_with_default(self: &SmartTable, key: K, default: &V): &V { pragma verify = false; } - spec load_factor(table: &SmartTable): u64 { + spec load_factor(self: &SmartTable): u64 { pragma verify = false; } spec to_simple_map( - table: &SmartTable, + self: &SmartTable, ): SimpleMap { pragma verify = false; } - spec keys(table_ref: &SmartTable): vector { + spec keys(self: &SmartTable): vector { pragma verify = false; } spec keys_paginated( - table_ref: &SmartTable, + self: &SmartTable, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64, @@ -69,35 +69,35 @@ spec aptos_std::smart_table { pragma verify = false; } - spec add_all(table: &mut SmartTable, keys: vector, values: vector) { + spec add_all(self: &mut SmartTable, keys: vector, values: vector) { pragma verify = false; } - spec update_split_load_threshold(table: &mut SmartTable, split_load_threshold: u8) { + spec update_split_load_threshold(self: &mut SmartTable, split_load_threshold: u8) { pragma verify = false; } - spec update_target_bucket_size(table: &mut SmartTable, target_bucket_size: u64) { + spec update_target_bucket_size(self: &mut SmartTable, target_bucket_size: u64) { pragma verify = false; } - spec borrow_kv(e: &Entry): (&K, &V) { + spec borrow_kv(self: &Entry): (&K, &V) { pragma verify = false; } - spec borrow_kv_mut(e: &mut Entry): (&mut K, &mut V) { + spec borrow_kv_mut(self: &mut Entry): (&mut K, &mut V) { pragma verify = false; } - spec num_buckets(table: &SmartTable): u64 { + spec num_buckets(self: &SmartTable): u64 { pragma verify = false; } - spec borrow_buckets(table: &SmartTable): &TableWithLength>> { + spec borrow_buckets(self: &SmartTable): &TableWithLength>> { pragma verify = false; } - spec borrow_buckets_mut(table: &mut SmartTable): &mut TableWithLength>> { + spec borrow_buckets_mut(self: &mut SmartTable): &mut TableWithLength>> { pragma verify = false; } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move index 10f3c816b2fa7..5fa0d13977f17 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move @@ -67,119 +67,119 @@ module aptos_std::smart_vector { v } - /// Destroy the vector `v`. - /// Aborts if `v` is not empty. - public fun destroy_empty(v: SmartVector) { - assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY)); - let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = v; + /// Destroy the vector `self`. + /// Aborts if `self` is not empty. + public fun destroy_empty(self: SmartVector) { + assert!(is_empty(&self), error::invalid_argument(EVECTOR_NOT_EMPTY)); + let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = self; vector::destroy_empty(inline_vec); option::destroy_none(big_vec); } /// Destroy a vector completely when T has `drop`. - public fun destroy(v: SmartVector) { - clear(&mut v); - destroy_empty(v); + public fun destroy(self: SmartVector) { + clear(&mut self); + destroy_empty(self); } /// Clear a vector completely when T has `drop`. - public fun clear(v: &mut SmartVector) { - v.inline_vec = vector[]; - if (option::is_some(&v.big_vec)) { - big_vector::destroy(option::extract(&mut v.big_vec)); + public fun clear(self: &mut SmartVector) { + self.inline_vec = vector[]; + if (option::is_some(&self.big_vec)) { + big_vector::destroy(option::extract(&mut self.big_vec)); } } - /// Acquire an immutable reference to the `i`th T of the vector `v`. + /// Acquire an immutable reference to the `i`th T of the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow(v: &SmartVector, i: u64): &T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + public fun borrow(self: &SmartVector, i: u64): &T { + assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let inline_len = vector::length(&self.inline_vec); if (i < inline_len) { - vector::borrow(&v.inline_vec, i) + vector::borrow(&self.inline_vec, i) } else { - big_vector::borrow(option::borrow(&v.big_vec), i - inline_len) + big_vector::borrow(option::borrow(&self.big_vec), i - inline_len) } } - /// Return a mutable reference to the `i`th T in the vector `v`. + /// Return a mutable reference to the `i`th T in the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow_mut(v: &mut SmartVector, i: u64): &mut T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + public fun borrow_mut(self: &mut SmartVector, i: u64): &mut T { + assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let inline_len = vector::length(&self.inline_vec); if (i < inline_len) { - vector::borrow_mut(&mut v.inline_vec, i) + vector::borrow_mut(&mut self.inline_vec, i) } else { - big_vector::borrow_mut(option::borrow_mut(&mut v.big_vec), i - inline_len) + big_vector::borrow_mut(option::borrow_mut(&mut self.big_vec), i - inline_len) } } - /// Empty and destroy the other vector, and push each of the Ts in the other vector onto the lhs vector in the + /// Empty and destroy the other vector, and push each of the Ts in the other vector onto the self vector in the /// same order as they occurred in other. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun append(lhs: &mut SmartVector, other: SmartVector) { + public fun append(self: &mut SmartVector, other: SmartVector) { let other_len = length(&other); let half_other_len = other_len / 2; let i = 0; while (i < half_other_len) { - push_back(lhs, swap_remove(&mut other, i)); + push_back(self, swap_remove(&mut other, i)); i = i + 1; }; while (i < other_len) { - push_back(lhs, pop_back(&mut other)); + push_back(self, pop_back(&mut other)); i = i + 1; }; destroy_empty(other); } /// Add multiple values to the vector at once. - public fun add_all(v: &mut SmartVector, vals: vector) { - vector::for_each(vals, |val| { push_back(v, val); }) + public fun add_all(self: &mut SmartVector, vals: vector) { + vector::for_each(vals, |val| { push_back(self, val); }) } /// Convert a smart vector to a native vector, which is supposed to be called mostly by view functions to get an /// atomic view of the whole vector. /// Disclaimer: This function may be costly as the smart vector may be huge in size. Use it at your own discretion. - public fun to_vector(v: &SmartVector): vector { - let res = v.inline_vec; - if (option::is_some(&v.big_vec)) { - let big_vec = option::borrow(&v.big_vec); + public fun to_vector(self: &SmartVector): vector { + let res = self.inline_vec; + if (option::is_some(&self.big_vec)) { + let big_vec = option::borrow(&self.big_vec); vector::append(&mut res, big_vector::to_vector(big_vec)); }; res } - /// Add T `val` to the end of the vector `v`. It grows the buckets when the current buckets are full. + /// Add T `val` to the end of the vector `self`. It grows the buckets when the current buckets are full. /// This operation will cost more gas when it adds new bucket. - public fun push_back(v: &mut SmartVector, val: T) { - let len = length(v); - let inline_len = vector::length(&v.inline_vec); + public fun push_back(self: &mut SmartVector, val: T) { + let len = length(self); + let inline_len = vector::length(&self.inline_vec); if (len == inline_len) { - let bucket_size = if (option::is_some(&v.inline_capacity)) { - if (len < *option::borrow(&v.inline_capacity)) { - vector::push_back(&mut v.inline_vec, val); + let bucket_size = if (option::is_some(&self.inline_capacity)) { + if (len < *option::borrow(&self.inline_capacity)) { + vector::push_back(&mut self.inline_vec, val); return }; - *option::borrow(&v.bucket_size) + *option::borrow(&self.bucket_size) } else { let val_size = size_of_val(&val); if (val_size * (inline_len + 1) < 150 /* magic number */) { - vector::push_back(&mut v.inline_vec, val); + vector::push_back(&mut self.inline_vec, val); return }; - let estimated_avg_size = max((size_of_val(&v.inline_vec) + val_size) / (inline_len + 1), 1); + let estimated_avg_size = max((size_of_val(&self.inline_vec) + val_size) / (inline_len + 1), 1); max(1024 /* free_write_quota */ / estimated_avg_size, 1) }; - option::fill(&mut v.big_vec, big_vector::empty(bucket_size)); + option::fill(&mut self.big_vec, big_vector::empty(bucket_size)); }; - big_vector::push_back(option::borrow_mut(&mut v.big_vec), val); + big_vector::push_back(option::borrow_mut(&mut self.big_vec), val); } - /// Pop an T from the end of vector `v`. It does shrink the buckets if they're empty. - /// Aborts if `v` is empty. - public fun pop_back(v: &mut SmartVector): T { - assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY)); - let big_vec_wrapper = &mut v.big_vec; + /// Pop an T from the end of vector `self`. It does shrink the buckets if they're empty. + /// Aborts if `self` is empty. + public fun pop_back(self: &mut SmartVector): T { + assert!(!is_empty(self), error::invalid_state(EVECTOR_EMPTY)); + let big_vec_wrapper = &mut self.big_vec; if (option::is_some(big_vec_wrapper)) { let big_vec = option::extract(big_vec_wrapper); let val = big_vector::pop_back(&mut big_vec); @@ -190,21 +190,21 @@ module aptos_std::smart_vector { }; val } else { - vector::pop_back(&mut v.inline_vec) + vector::pop_back(&mut self.inline_vec) } } - /// Remove the T at index i in the vector v and return the owned value that was previously stored at i in v. + /// Remove the T at index i in the vector self and return the owned value that was previously stored at i in self. /// All Ts occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun remove(v: &mut SmartVector, i: u64): T { - let len = length(v); + public fun remove(self: &mut SmartVector, i: u64): T { + let len = length(self); assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + let inline_len = vector::length(&self.inline_vec); if (i < inline_len) { - vector::remove(&mut v.inline_vec, i) + vector::remove(&mut self.inline_vec, i) } else { - let big_vec_wrapper = &mut v.big_vec; + let big_vec_wrapper = &mut self.big_vec; let big_vec = option::extract(big_vec_wrapper); let val = big_vector::remove(&mut big_vec, i - inline_len); if (big_vector::is_empty(&big_vec)) { @@ -216,15 +216,15 @@ module aptos_std::smart_vector { } } - /// Swap the `i`th T of the vector `v` with the last T and then pop the vector. + /// Swap the `i`th T of the vector `self` with the last T and then pop the vector. /// This is O(1), but does not preserve ordering of Ts in the vector. /// Aborts if `i` is out of bounds. - public fun swap_remove(v: &mut SmartVector, i: u64): T { - let len = length(v); + public fun swap_remove(self: &mut SmartVector, i: u64): T { + let len = length(self); assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); - let big_vec_wrapper = &mut v.big_vec; - let inline_vec = &mut v.inline_vec; + let inline_len = vector::length(&self.inline_vec); + let big_vec_wrapper = &mut self.big_vec; + let inline_vec = &mut self.inline_vec; if (i >= inline_len) { let big_vec = option::extract(big_vec_wrapper); let val = big_vector::swap_remove(&mut big_vec, i - inline_len); @@ -250,21 +250,21 @@ module aptos_std::smart_vector { } /// Swap the Ts at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds - /// for v. - public fun swap(v: &mut SmartVector, i: u64, j: u64) { + /// for self. + public fun swap(self: &mut SmartVector, i: u64, j: u64) { if (i > j) { - return swap(v, j, i) + return swap(self, j, i) }; - let len = length(v); + let len = length(self); assert!(j < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + let inline_len = vector::length(&self.inline_vec); if (i >= inline_len) { - big_vector::swap(option::borrow_mut(&mut v.big_vec), i - inline_len, j - inline_len); + big_vector::swap(option::borrow_mut(&mut self.big_vec), i - inline_len, j - inline_len); } else if (j < inline_len) { - vector::swap(&mut v.inline_vec, i, j); + vector::swap(&mut self.inline_vec, i, j); } else { - let big_vec = option::borrow_mut(&mut v.big_vec); - let inline_vec = &mut v.inline_vec; + let big_vec = option::borrow_mut(&mut self.big_vec); + let inline_vec = &mut self.inline_vec; let element_i = vector::swap_remove(inline_vec, i); let element_j = big_vector::swap_remove(big_vec, j - inline_len); vector::push_back(inline_vec, element_j); @@ -274,128 +274,128 @@ module aptos_std::smart_vector { } } - /// Reverse the order of the Ts in the vector v in-place. + /// Reverse the order of the Ts in the vector self in-place. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun reverse(v: &mut SmartVector) { - let inline_len = vector::length(&v.inline_vec); + public fun reverse(self: &mut SmartVector) { + let inline_len = vector::length(&self.inline_vec); let i = 0; let new_inline_vec = vector[]; // Push the last `inline_len` Ts into a temp vector. while (i < inline_len) { - vector::push_back(&mut new_inline_vec, pop_back(v)); + vector::push_back(&mut new_inline_vec, pop_back(self)); i = i + 1; }; vector::reverse(&mut new_inline_vec); // Reverse the big_vector left if exists. - if (option::is_some(&v.big_vec)) { - big_vector::reverse(option::borrow_mut(&mut v.big_vec)); + if (option::is_some(&self.big_vec)) { + big_vector::reverse(option::borrow_mut(&mut self.big_vec)); }; // Mem::swap the two vectors. let temp_vec = vector[]; - while (!vector::is_empty(&mut v.inline_vec)) { - vector::push_back(&mut temp_vec, vector::pop_back(&mut v.inline_vec)); + while (!vector::is_empty(&mut self.inline_vec)) { + vector::push_back(&mut temp_vec, vector::pop_back(&mut self.inline_vec)); }; vector::reverse(&mut temp_vec); while (!vector::is_empty(&mut new_inline_vec)) { - vector::push_back(&mut v.inline_vec, vector::pop_back(&mut new_inline_vec)); + vector::push_back(&mut self.inline_vec, vector::pop_back(&mut new_inline_vec)); }; vector::destroy_empty(new_inline_vec); // Push the rest Ts originally left in inline_vector back to the end of the smart vector. while (!vector::is_empty(&mut temp_vec)) { - push_back(v, vector::pop_back(&mut temp_vec)); + push_back(self, vector::pop_back(&mut temp_vec)); }; vector::destroy_empty(temp_vec); } - /// Return `(true, i)` if `val` is in the vector `v` at index `i`. + /// Return `(true, i)` if `val` is in the vector `self` at index `i`. /// Otherwise, returns `(false, 0)`. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun index_of(v: &SmartVector, val: &T): (bool, u64) { - let (found, i) = vector::index_of(&v.inline_vec, val); + public fun index_of(self: &SmartVector, val: &T): (bool, u64) { + let (found, i) = vector::index_of(&self.inline_vec, val); if (found) { (true, i) - } else if (option::is_some(&v.big_vec)) { - let (found, i) = big_vector::index_of(option::borrow(&v.big_vec), val); - (found, i + vector::length(&v.inline_vec)) + } else if (option::is_some(&self.big_vec)) { + let (found, i) = big_vector::index_of(option::borrow(&self.big_vec), val); + (found, i + vector::length(&self.inline_vec)) } else { (false, 0) } } - /// Return true if `val` is in the vector `v`. + /// Return true if `val` is in the vector `self`. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun contains(v: &SmartVector, val: &T): bool { - if (is_empty(v)) return false; - let (exist, _) = index_of(v, val); + public fun contains(self: &SmartVector, val: &T): bool { + if (is_empty(self)) return false; + let (exist, _) = index_of(self, val); exist } /// Return the length of the vector. - public fun length(v: &SmartVector): u64 { - vector::length(&v.inline_vec) + if (option::is_none(&v.big_vec)) { + public fun length(self: &SmartVector): u64 { + vector::length(&self.inline_vec) + if (option::is_none(&self.big_vec)) { 0 } else { - big_vector::length(option::borrow(&v.big_vec)) + big_vector::length(option::borrow(&self.big_vec)) } } - /// Return `true` if the vector `v` has no Ts and `false` otherwise. - public fun is_empty(v: &SmartVector): bool { - length(v) == 0 + /// Return `true` if the vector `self` has no Ts and `false` otherwise. + public fun is_empty(self: &SmartVector): bool { + length(self) == 0 } /// Apply the function to each T in the vector, consuming it. - public inline fun for_each(v: SmartVector, f: |T|) { - aptos_std::smart_vector::reverse(&mut v); // We need to reverse the vector to consume it efficiently - aptos_std::smart_vector::for_each_reverse(v, |e| f(e)); + public inline fun for_each(self: SmartVector, f: |T|) { + aptos_std::smart_vector::reverse(&mut self); // We need to reverse the vector to consume it efficiently + aptos_std::smart_vector::for_each_reverse(self, |e| f(e)); } /// Apply the function to each T in the vector, consuming it. - public inline fun for_each_reverse(v: SmartVector, f: |T|) { - let len = aptos_std::smart_vector::length(&v); + public inline fun for_each_reverse(self: SmartVector, f: |T|) { + let len = aptos_std::smart_vector::length(&self); while (len > 0) { - f(aptos_std::smart_vector::pop_back(&mut v)); + f(aptos_std::smart_vector::pop_back(&mut self)); len = len - 1; }; - aptos_std::smart_vector::destroy_empty(v) + aptos_std::smart_vector::destroy_empty(self) } /// Apply the function to a reference of each T in the vector. - public inline fun for_each_ref(v: &SmartVector, f: |&T|) { + public inline fun for_each_ref(self: &SmartVector, f: |&T|) { let i = 0; - let len = aptos_std::smart_vector::length(v); + let len = aptos_std::smart_vector::length(self); while (i < len) { - f(aptos_std::smart_vector::borrow(v, i)); + f(aptos_std::smart_vector::borrow(self, i)); i = i + 1 } } /// Apply the function to a mutable reference to each T in the vector. - public inline fun for_each_mut(v: &mut SmartVector, f: |&mut T|) { + public inline fun for_each_mut(self: &mut SmartVector, f: |&mut T|) { let i = 0; - let len = aptos_std::smart_vector::length(v); + let len = aptos_std::smart_vector::length(self); while (i < len) { - f(aptos_std::smart_vector::borrow_mut(v, i)); + f(aptos_std::smart_vector::borrow_mut(self, i)); i = i + 1 } } /// Apply the function to a reference of each T in the vector with its index. - public inline fun enumerate_ref(v: &SmartVector, f: |u64, &T|) { + public inline fun enumerate_ref(self: &SmartVector, f: |u64, &T|) { let i = 0; - let len = aptos_std::smart_vector::length(v); + let len = aptos_std::smart_vector::length(self); while (i < len) { - f(i, aptos_std::smart_vector::borrow(v, i)); + f(i, aptos_std::smart_vector::borrow(self, i)); i = i + 1; }; } /// Apply the function to a mutable reference of each T in the vector with its index. - public inline fun enumerate_mut(v: &mut SmartVector, f: |u64, &mut T|) { + public inline fun enumerate_mut(self: &mut SmartVector, f: |u64, &mut T|) { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - f(i, borrow_mut(v, i)); + f(i, borrow_mut(self, i)); i = i + 1; }; } @@ -403,100 +403,100 @@ module aptos_std::smart_vector { /// Fold the function over the Ts. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(f(f(0, 1), 2), 3)` public inline fun fold( - v: SmartVector, + self: SmartVector, init: Accumulator, f: |Accumulator, T|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each(v, |elem| accu = f(accu, elem)); + aptos_std::smart_vector::for_each(self, |elem| accu = f(accu, elem)); accu } /// Fold right like fold above but working right to left. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(1, f(2, f(3, 0)))` public inline fun foldr( - v: SmartVector, + self: SmartVector, init: Accumulator, f: |T, Accumulator|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each_reverse(v, |elem| accu = f(elem, accu)); + aptos_std::smart_vector::for_each_reverse(self, |elem| accu = f(elem, accu)); accu } /// Map the function over the references of the Ts of the vector, producing a new vector without modifying the /// original vector. public inline fun map_ref( - v: &SmartVector, + self: &SmartVector, f: |&T1|T2 ): SmartVector { let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::for_each_ref(v, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem))); + aptos_std::smart_vector::for_each_ref(self, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem))); result } /// Map the function over the Ts of the vector, producing a new vector. public inline fun map( - v: SmartVector, + self: SmartVector, f: |T1|T2 ): SmartVector { let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::for_each(v, |elem| push_back(&mut result, f(elem))); + aptos_std::smart_vector::for_each(self, |elem| push_back(&mut result, f(elem))); result } /// Filter the vector using the boolean function, removing all Ts for which `p(e)` is not true. public inline fun filter( - v: SmartVector, + self: SmartVector, p: |&T|bool ): SmartVector { let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::for_each(v, |elem| { + aptos_std::smart_vector::for_each(self, |elem| { if (p(&elem)) aptos_std::smart_vector::push_back(&mut result, elem); }); result } - public inline fun zip(v1: SmartVector, v2: SmartVector, f: |T1, T2|) { + public inline fun zip(self: SmartVector, v2: SmartVector, f: |T1, T2|) { // We need to reverse the vectors to consume it efficiently - aptos_std::smart_vector::reverse(&mut v1); + aptos_std::smart_vector::reverse(&mut self); aptos_std::smart_vector::reverse(&mut v2); - aptos_std::smart_vector::zip_reverse(v1, v2, |e1, e2| f(e1, e2)); + aptos_std::smart_vector::zip_reverse(self, v2, |e1, e2| f(e1, e2)); } /// Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. /// This errors out if the vectors are not of the same length. public inline fun zip_reverse( - v1: SmartVector, + self: SmartVector, v2: SmartVector, f: |T1, T2|, ) { - let len = aptos_std::smart_vector::length(&v1); + let len = aptos_std::smart_vector::length(&self); // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. assert!(len == aptos_std::smart_vector::length(&v2), 0x20005); while (len > 0) { - f(aptos_std::smart_vector::pop_back(&mut v1), aptos_std::smart_vector::pop_back(&mut v2)); + f(aptos_std::smart_vector::pop_back(&mut self), aptos_std::smart_vector::pop_back(&mut v2)); len = len - 1; }; - aptos_std::smart_vector::destroy_empty(v1); + aptos_std::smart_vector::destroy_empty(self); aptos_std::smart_vector::destroy_empty(v2); } /// Apply the function to the references of each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_ref( - v1: &SmartVector, + self: &SmartVector, v2: &SmartVector, f: |&T1, &T2|, ) { - let len = aptos_std::smart_vector::length(v1); + let len = aptos_std::smart_vector::length(self); // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. assert!(len == aptos_std::smart_vector::length(v2), 0x20005); let i = 0; while (i < len) { - f(aptos_std::smart_vector::borrow(v1, i), aptos_std::smart_vector::borrow(v2, i)); + f(aptos_std::smart_vector::borrow(self, i), aptos_std::smart_vector::borrow(v2, i)); i = i + 1 } } @@ -504,49 +504,49 @@ module aptos_std::smart_vector { /// Apply the function to mutable references to each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_mut( - v1: &mut SmartVector, + self: &mut SmartVector, v2: &mut SmartVector, f: |&mut T1, &mut T2|, ) { let i = 0; - let len = aptos_std::smart_vector::length(v1); + let len = aptos_std::smart_vector::length(self); // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. assert!(len == aptos_std::smart_vector::length(v2), 0x20005); while (i < len) { - f(aptos_std::smart_vector::borrow_mut(v1, i), aptos_std::smart_vector::borrow_mut(v2, i)); + f(aptos_std::smart_vector::borrow_mut(self, i), aptos_std::smart_vector::borrow_mut(v2, i)); i = i + 1 } } /// Map the function over the element pairs of the two vectors, producing a new vector. public inline fun zip_map( - v1: SmartVector, + self: SmartVector, v2: SmartVector, f: |T1, T2|NewT ): SmartVector { // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(aptos_std::smart_vector::length(&v1) == aptos_std::smart_vector::length(&v2), 0x20005); + assert!(aptos_std::smart_vector::length(&self) == aptos_std::smart_vector::length(&v2), 0x20005); let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + aptos_std::smart_vector::zip(self, v2, |e1, e2| push_back(&mut result, f(e1, e2))); result } /// Map the function over the references of the element pairs of two vectors, producing a new vector from the return /// values without modifying the original vectors. public inline fun zip_map_ref( - v1: &SmartVector, + self: &SmartVector, v2: &SmartVector, f: |&T1, &T2|NewT ): SmartVector { // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(aptos_std::smart_vector::length(v1) == aptos_std::smart_vector::length(v2), 0x20005); + assert!(aptos_std::smart_vector::length(self) == aptos_std::smart_vector::length(v2), 0x20005); let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + aptos_std::smart_vector::zip_ref(self, v2, |e1, e2| push_back(&mut result, f(e1, e2))); result } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move index c1af495eaa9ed..40c77c5b94309 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move @@ -13,7 +13,8 @@ spec aptos_std::smart_vector { } spec length { - aborts_if option::is_some(v.big_vec) && len(v.inline_vec) + big_vector::length(option::spec_borrow(v.big_vec)) > MAX_U64; + aborts_if option::is_some(self.big_vec) && len(self.inline_vec) + big_vector::length(option::spec_borrow( + self.big_vec)) > MAX_U64; } spec empty { @@ -25,19 +26,19 @@ spec aptos_std::smart_vector { } spec destroy_empty { - aborts_if !(is_empty(v)); - aborts_if len(v.inline_vec) != 0 - || option::is_some(v.big_vec); + aborts_if !(is_empty(self)); + aborts_if len(self.inline_vec) != 0 + || option::is_some(self.big_vec); } spec borrow { - aborts_if i >= length(v); - aborts_if option::is_some(v.big_vec) && ( - (len(v.inline_vec) + big_vector::length(option::borrow(v.big_vec))) > MAX_U64 + aborts_if i >= length(self); + aborts_if option::is_some(self.big_vec) && ( + (len(self.inline_vec) + big_vector::length(option::borrow(self.big_vec))) > MAX_U64 ); } - spec push_back(v: &mut SmartVector, val: T) { + spec push_back(self: &mut SmartVector, val: T) { // use aptos_std::big_vector; // use aptos_std::type_info; pragma verify = false; // TODO: set to false because of timeout @@ -65,24 +66,24 @@ spec aptos_std::smart_vector { pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved) - aborts_if option::is_some(v.big_vec) + aborts_if option::is_some(self.big_vec) && - (table_with_length::spec_len(option::borrow(v.big_vec).buckets) == 0); - aborts_if is_empty(v); - aborts_if option::is_some(v.big_vec) && ( - (len(v.inline_vec) + big_vector::length(option::borrow(v.big_vec))) > MAX_U64 + (table_with_length::spec_len(option::borrow(self.big_vec).buckets) == 0); + aborts_if is_empty(self); + aborts_if option::is_some(self.big_vec) && ( + (len(self.inline_vec) + big_vector::length(option::borrow(self.big_vec))) > MAX_U64 ); - ensures length(v) == length(old(v)) - 1; + ensures length(self) == length(old(self)) - 1; } spec swap_remove { pragma verify = false; // TODO: set because of timeout - aborts_if i >= length(v); - aborts_if option::is_some(v.big_vec) && ( - (len(v.inline_vec) + big_vector::length(option::borrow(v.big_vec))) > MAX_U64 + aborts_if i >= length(self); + aborts_if option::is_some(self.big_vec) && ( + (len(self.inline_vec) + big_vector::length(option::borrow(self.big_vec))) > MAX_U64 ); - ensures length(v) == length(old(v)) - 1; + ensures length(self) == length(old(self)) - 1; } spec swap { diff --git a/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move b/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move index ac864c821495b..a5bb552c2ea18 100644 --- a/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move +++ b/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move @@ -29,22 +29,22 @@ module aptos_std::fixed_point64 { /// Abort code on calculation result is negative. const ENEGATIVE_RESULT: u64 = 0x10006; - /// Returns x - y. x must be not less than y. - public fun sub(x: FixedPoint64, y: FixedPoint64): FixedPoint64 { - let x_raw = get_raw_value(x); + /// Returns self - y. self must be not less than y. + public fun sub(self: FixedPoint64, y: FixedPoint64): FixedPoint64 { + let x_raw = get_raw_value(self); let y_raw = get_raw_value(y); assert!(x_raw >= y_raw, ENEGATIVE_RESULT); create_from_raw_value(x_raw - y_raw) } spec sub { pragma opaque; - aborts_if x.value < y.value with ENEGATIVE_RESULT; - ensures result.value == x.value - y.value; + aborts_if self.value < y.value with ENEGATIVE_RESULT; + ensures result.value == self.value - y.value; } - /// Returns x + y. The result cannot be greater than MAX_U128. - public fun add(x: FixedPoint64, y: FixedPoint64): FixedPoint64 { - let x_raw = get_raw_value(x); + /// Returns self + y. The result cannot be greater than MAX_U128. + public fun add(self: FixedPoint64, y: FixedPoint64): FixedPoint64 { + let x_raw = get_raw_value(self); let y_raw = get_raw_value(y); let result = (x_raw as u256) + (y_raw as u256); assert!(result <= MAX_U128, ERATIO_OUT_OF_RANGE); @@ -52,8 +52,8 @@ module aptos_std::fixed_point64 { } spec add { pragma opaque; - aborts_if (x.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE; - ensures result.value == x.value + y.value; + aborts_if (self.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE; + ensures result.value == self.value + y.value; } /// Multiply a u128 integer by a fixed-point number, truncating any @@ -172,13 +172,13 @@ module aptos_std::fixed_point64 { /// Accessor for the raw u128 value. Other less common operations, such as /// adding or subtracting FixedPoint64 values, can be done using the raw /// values directly. - public fun get_raw_value(num: FixedPoint64): u128 { - num.value + public fun get_raw_value(self: FixedPoint64): u128 { + self.value } /// Returns true if the ratio is zero. - public fun is_zero(num: FixedPoint64): bool { - num.value == 0 + public fun is_zero(self: FixedPoint64): bool { + self.value == 0 } /// Returns the smaller of the two FixedPoint64 numbers. @@ -223,89 +223,89 @@ module aptos_std::fixed_point64 { } } - /// Returns true if num1 <= num2 - public fun less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value <= num2.value + /// Returns true if self <= num2 + public fun less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value <= num2.value } spec less_or_equal { pragma opaque; aborts_if false; - ensures result == spec_less_or_equal(num1, num2); + ensures result == spec_less_or_equal(self, num2); } - spec fun spec_less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value <= num2.value + spec fun spec_less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value <= num2.value } - /// Returns true if num1 < num2 - public fun less(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value < num2.value + /// Returns true if self < num2 + public fun less(self: FixedPoint64, num2: FixedPoint64): bool { + self.value < num2.value } spec less { pragma opaque; aborts_if false; - ensures result == spec_less(num1, num2); + ensures result == spec_less(self, num2); } - spec fun spec_less(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value < num2.value + spec fun spec_less(self: FixedPoint64, num2: FixedPoint64): bool { + self.value < num2.value } - /// Returns true if num1 >= num2 - public fun greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value >= num2.value + /// Returns true if self >= num2 + public fun greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value >= num2.value } spec greater_or_equal { pragma opaque; aborts_if false; - ensures result == spec_greater_or_equal(num1, num2); + ensures result == spec_greater_or_equal(self, num2); } - spec fun spec_greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value >= num2.value + spec fun spec_greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value >= num2.value } - /// Returns true if num1 > num2 - public fun greater(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value > num2.value + /// Returns true if self > num2 + public fun greater(self: FixedPoint64, num2: FixedPoint64): bool { + self.value > num2.value } spec greater { pragma opaque; aborts_if false; - ensures result == spec_greater(num1, num2); + ensures result == spec_greater(self, num2); } - spec fun spec_greater(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value > num2.value + spec fun spec_greater(self: FixedPoint64, num2: FixedPoint64): bool { + self.value > num2.value } - /// Returns true if num1 = num2 - public fun equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value == num2.value + /// Returns true if self = num2 + public fun equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value == num2.value } spec equal { pragma opaque; aborts_if false; - ensures result == spec_equal(num1, num2); + ensures result == spec_equal(self, num2); } - spec fun spec_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value == num2.value + spec fun spec_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value == num2.value } - /// Returns true if num1 almost equals to num2, which means abs(num1-num2) <= precision - public fun almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { - if (num1.value > num2.value) { - (num1.value - num2.value <= precision.value) + /// Returns true if self almost equals to num2, which means abs(num1-num2) <= precision + public fun almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { + if (self.value > num2.value) { + (self.value - num2.value <= precision.value) } else { - (num2.value - num1.value <= precision.value) + (num2.value - self.value <= precision.value) } } spec almost_equal { pragma opaque; aborts_if false; - ensures result == spec_almost_equal(num1, num2, precision); + ensures result == spec_almost_equal(self, num2, precision); } - spec fun spec_almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { - if (num1.value > num2.value) { - (num1.value - num2.value <= precision.value) + spec fun spec_almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { + if (self.value > num2.value) { + (self.value - num2.value <= precision.value) } else { - (num2.value - num1.value <= precision.value) + (num2.value - self.value <= precision.value) } } /// Create a fixedpoint value from a u128 value. @@ -329,27 +329,27 @@ module aptos_std::fixed_point64 { } /// Returns the largest integer less than or equal to a given number. - public fun floor(num: FixedPoint64): u128 { - num.value >> 64 + public fun floor(self: FixedPoint64): u128 { + self.value >> 64 } spec floor { pragma opaque; aborts_if false; - ensures result == spec_floor(num); + ensures result == spec_floor(self); } - spec fun spec_floor(val: FixedPoint64): u128 { - let fractional = val.value % (1 << 64); + spec fun spec_floor(self: FixedPoint64): u128 { + let fractional = self.value % (1 << 64); if (fractional == 0) { - val.value >> 64 + self.value >> 64 } else { - (val.value - fractional) >> 64 + (self.value - fractional) >> 64 } } /// Rounds up the given FixedPoint64 to the next largest integer. - public fun ceil(num: FixedPoint64): u128 { - let floored_num = floor(num) << 64; - if (num.value == floored_num) { + public fun ceil(self: FixedPoint64): u128 { + let floored_num = floor(self) << 64; + if (self.value == floored_num) { return floored_num >> 64 }; let val = ((floored_num as u256) + (1 << 64)); @@ -360,41 +360,41 @@ module aptos_std::fixed_point64 { pragma verify_duration_estimate = 1000; pragma opaque; aborts_if false; - ensures result == spec_ceil(num); + ensures result == spec_ceil(self); } - spec fun spec_ceil(val: FixedPoint64): u128 { - let fractional = val.value % (1 << 64); + spec fun spec_ceil(self: FixedPoint64): u128 { + let fractional = self.value % (1 << 64); let one = 1 << 64; if (fractional == 0) { - val.value >> 64 + self.value >> 64 } else { - (val.value - fractional + one) >> 64 + (self.value - fractional + one) >> 64 } } /// Returns the value of a FixedPoint64 to the nearest integer. - public fun round(num: FixedPoint64): u128 { - let floored_num = floor(num) << 64; + public fun round(self: FixedPoint64): u128 { + let floored_num = floor(self) << 64; let boundary = floored_num + ((1 << 64) / 2); - if (num.value < boundary) { + if (self.value < boundary) { floored_num >> 64 } else { - ceil(num) + ceil(self) } } spec round { pragma opaque; aborts_if false; - ensures result == spec_round(num); + ensures result == spec_round(self); } - spec fun spec_round(val: FixedPoint64): u128 { - let fractional = val.value % (1 << 64); + spec fun spec_round(self: FixedPoint64): u128 { + let fractional = self.value % (1 << 64); let boundary = (1 << 64) / 2; let one = 1 << 64; if (fractional < boundary) { - (val.value - fractional) >> 64 + (self.value - fractional) >> 64 } else { - (val.value - fractional + one) >> 64 + (self.value - fractional + one) >> 64 } } diff --git a/aptos-move/framework/aptos-stdlib/sources/math128.move b/aptos-move/framework/aptos-stdlib/sources/math128.move index 6528153699fb4..df239dbca0846 100644 --- a/aptos-move/framework/aptos-stdlib/sources/math128.move +++ b/aptos-move/framework/aptos-stdlib/sources/math128.move @@ -39,6 +39,15 @@ module aptos_std::math128 { large } + /// Return least common multiple of `a` & `b` + public inline fun lcm(a: u128, b: u128): u128 { + if (a == 0 || b == 0) { + 0 + } else { + a / gcd(a, b) * b + } + } + /// Returns a * b / c going through u256 to prevent intermediate overflow public inline fun mul_div(a: u128, b: u128, c: u128): u128 { // Inline functions cannot take constants, as then every module using it needs the constant @@ -193,6 +202,28 @@ module aptos_std::math128 { assert!(gcd(462, 1071) == 21, 0); } + #[test] + fun test_lcm() { + assert!(lcm(0, 0) == 0, 0); + assert!(lcm(0, 1) == 0, 0); + assert!(lcm(1, 0) == 0, 0); + assert!(lcm(1, 1) == 1, 0); + assert!(lcm(1024, 144) == 9216, 0); + assert!(lcm(2, 17) == 34, 0); + assert!(lcm(17, 2) == 34, 0); + assert!(lcm(24, 54) == 216, 0); + assert!(lcm(115, 9) == 1035, 0); + assert!(lcm(101, 14) == 1414, 0); + assert!(lcm(110, 5) == 110, 0); + assert!(lcm(100, 8) == 200, 0); + assert!(lcm(32, 6) == 96, 0); + assert!(lcm(110, 13) == 1430, 0); + assert!(lcm(117, 13) == 117, 0); + assert!(lcm(100, 125) == 500, 0); + assert!(lcm(101, 3) == 303, 0); + assert!(lcm(115, 16) == 1840, 0); + } + #[test] public entry fun test_max() { let result = max(3u128, 6u128); diff --git a/aptos-move/framework/aptos-stdlib/sources/math64.move b/aptos-move/framework/aptos-stdlib/sources/math64.move index 50fd38ed3f6ab..88cd90a68f605 100644 --- a/aptos-move/framework/aptos-stdlib/sources/math64.move +++ b/aptos-move/framework/aptos-stdlib/sources/math64.move @@ -37,6 +37,15 @@ module aptos_std::math64 { large } + /// Returns least common multiple of `a` & `b`. + public inline fun lcm(a: u64, b: u64): u64 { + if (a == 0 || b == 0) { + 0 + } else { + a / gcd(a, b) * b + } + } + /// Returns a * b / c going through u128 to prevent intermediate overflow public inline fun mul_div(a: u64, b: u64, c: u64): u64 { // Inline functions cannot take constants, as then every module using it needs the constant @@ -167,6 +176,28 @@ module aptos_std::math64 { assert!(gcd(462, 1071) == 21, 0); } + #[test] + fun test_lcm() { + assert!(lcm(0, 0) == 0, 0); + assert!(lcm(0, 1) == 0, 0); + assert!(lcm(1, 0) == 0, 0); + assert!(lcm(1, 1) == 1, 0); + assert!(lcm(1024, 144) == 9216, 0); + assert!(lcm(2, 17) == 34, 0); + assert!(lcm(17, 2) == 34, 0); + assert!(lcm(24, 54) == 216, 0); + assert!(lcm(115, 9) == 1035, 0); + assert!(lcm(101, 14) == 1414, 0); + assert!(lcm(110, 5) == 110, 0); + assert!(lcm(100, 8) == 200, 0); + assert!(lcm(32, 6) == 96, 0); + assert!(lcm(110, 13) == 1430, 0); + assert!(lcm(117, 13) == 117, 0); + assert!(lcm(100, 125) == 500, 0); + assert!(lcm(101, 3) == 303, 0); + assert!(lcm(115, 16) == 1840, 0); + } + #[test] public entry fun test_max_64() { let result = max(3u64, 6u64); diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64.move index f1aaea9fd947f..7d049fdf37133 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64.move @@ -72,8 +72,8 @@ module aptos_std::pool_u64 { } /// Destroy an empty pool. This will fail if the pool has any balance of coins. - public fun destroy_empty(pool: Pool) { - assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); + public fun destroy_empty(self: Pool) { + assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); let Pool { shareholders_limit: _, total_coins: _, @@ -81,73 +81,73 @@ module aptos_std::pool_u64 { shares: _, shareholders: _, scaling_factor: _, - } = pool; + } = self; } - /// Return `pool`'s total balance of coins. - public fun total_coins(pool: &Pool): u64 { - pool.total_coins + /// Return `self`'s total balance of coins. + public fun total_coins(self: &Pool): u64 { + self.total_coins } - /// Return the total number of shares across all shareholders in `pool`. - public fun total_shares(pool: &Pool): u64 { - pool.total_shares + /// Return the total number of shares across all shareholders in `self`. + public fun total_shares(self: &Pool): u64 { + self.total_shares } - /// Return true if `shareholder` is in `pool`. - public fun contains(pool: &Pool, shareholder: address): bool { - simple_map::contains_key(&pool.shares, &shareholder) + /// Return true if `shareholder` is in `self`. + public fun contains(self: &Pool, shareholder: address): bool { + simple_map::contains_key(&self.shares, &shareholder) } - /// Return the number of shares of `stakeholder` in `pool`. - public fun shares(pool: &Pool, shareholder: address): u64 { - if (contains(pool, shareholder)) { - *simple_map::borrow(&pool.shares, &shareholder) + /// Return the number of shares of `stakeholder` in `self`. + public fun shares(self: &Pool, shareholder: address): u64 { + if (contains(self, shareholder)) { + *simple_map::borrow(&self.shares, &shareholder) } else { 0 } } - /// Return the balance in coins of `shareholder` in `pool.` - public fun balance(pool: &Pool, shareholder: address): u64 { - let num_shares = shares(pool, shareholder); - shares_to_amount(pool, num_shares) + /// Return the balance in coins of `shareholder` in `self`. + public fun balance(self: &Pool, shareholder: address): u64 { + let num_shares = shares(self, shareholder); + shares_to_amount(self, num_shares) } - /// Return the list of shareholders in `pool`. - public fun shareholders(pool: &Pool): vector
{ - pool.shareholders + /// Return the list of shareholders in `self`. + public fun shareholders(self: &Pool): vector
{ + self.shareholders } - /// Return the number of shareholders in `pool`. - public fun shareholders_count(pool: &Pool): u64 { - vector::length(&pool.shareholders) + /// Return the number of shareholders in `self`. + public fun shareholders_count(self: &Pool): u64 { + vector::length(&self.shareholders) } - /// Update `pool`'s total balance of coins. - public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) { - pool.total_coins = new_total_coins; + /// Update `self`'s total balance of coins. + public fun update_total_coins(self: &mut Pool, new_total_coins: u64) { + self.total_coins = new_total_coins; } /// Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. - public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u64 { + public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 { if (coins_amount == 0) return 0; - let new_shares = amount_to_shares(pool, coins_amount); - assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); - assert!(MAX_U64 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); + let new_shares = amount_to_shares(self, coins_amount); + assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); + assert!(MAX_U64 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); - pool.total_coins = pool.total_coins + coins_amount; - pool.total_shares = pool.total_shares + new_shares; - add_shares(pool, shareholder, new_shares); + self.total_coins = self.total_coins + coins_amount; + self.total_shares = self.total_shares + new_shares; + add_shares(self, shareholder, new_shares); new_shares } - /// Add the number of shares directly for `shareholder` in `pool`. + /// Add the number of shares directly for `shareholder` in `self`. /// This would dilute other shareholders if the pool's balance of coins didn't change. - fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u64): u64 { - if (contains(pool, shareholder)) { - let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder); + fun add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 { + if (contains(self, shareholder)) { + let existing_shares = simple_map::borrow_mut(&mut self.shares, &shareholder); let current_shares = *existing_shares; assert!(MAX_U64 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW)); @@ -155,110 +155,110 @@ module aptos_std::pool_u64 { *existing_shares } else if (new_shares > 0) { assert!( - vector::length(&pool.shareholders) < pool.shareholders_limit, + vector::length(&self.shareholders) < self.shareholders_limit, error::invalid_state(ETOO_MANY_SHAREHOLDERS), ); - vector::push_back(&mut pool.shareholders, shareholder); - simple_map::add(&mut pool.shares, shareholder, new_shares); + vector::push_back(&mut self.shareholders, shareholder); + simple_map::add(&mut self.shares, shareholder, new_shares); new_shares } else { new_shares } } - /// Allow `shareholder` to redeem their shares in `pool` for coins. - public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Allow `shareholder` to redeem their shares in `self` for coins. + public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { + assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(shares(self, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_redeem == 0) return 0; - let redeemed_coins = shares_to_amount(pool, shares_to_redeem); - pool.total_coins = pool.total_coins - redeemed_coins; - pool.total_shares = pool.total_shares - shares_to_redeem; - deduct_shares(pool, shareholder, shares_to_redeem); + let redeemed_coins = shares_to_amount(self, shares_to_redeem); + self.total_coins = self.total_coins - redeemed_coins; + self.total_shares = self.total_shares - shares_to_redeem; + deduct_shares(self, shareholder, shares_to_redeem); redeemed_coins } /// Transfer shares from `shareholder_1` to `shareholder_2`. public fun transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64, ) { - assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); + assert!(contains(self, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(shares(self, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_transfer == 0) return; - deduct_shares(pool, shareholder_1, shares_to_transfer); - add_shares(pool, shareholder_2, shares_to_transfer); + deduct_shares(self, shareholder_1, shares_to_transfer); + add_shares(self, shareholder_2, shares_to_transfer); } - /// Directly deduct `shareholder`'s number of shares in `pool` and return the number of remaining shares. - fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u64): u64 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Directly deduct `shareholder`'s number of shares in `self` and return the number of remaining shares. + fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 { + assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(shares(self, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); - let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder); + let existing_shares = simple_map::borrow_mut(&mut self.shares, &shareholder); *existing_shares = *existing_shares - num_shares; // Remove the shareholder completely if they have no shares left. let remaining_shares = *existing_shares; if (remaining_shares == 0) { - let (_, shareholder_index) = vector::index_of(&pool.shareholders, &shareholder); - vector::remove(&mut pool.shareholders, shareholder_index); - simple_map::remove(&mut pool.shares, &shareholder); + let (_, shareholder_index) = vector::index_of(&self.shareholders, &shareholder); + vector::remove(&mut self.shareholders, shareholder_index); + simple_map::remove(&mut self.shares, &shareholder); }; remaining_shares } - /// Return the number of new shares `coins_amount` can buy in `pool`. + /// Return the number of new shares `coins_amount` can buy in `self`. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares(pool: &Pool, coins_amount: u64): u64 { - amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins) + public fun amount_to_shares(self: &Pool, coins_amount: u64): u64 { + amount_to_shares_with_total_coins(self, coins_amount, self.total_coins) } - /// Return the number of new shares `coins_amount` can buy in `pool` with a custom total coins number. + /// Return the number of new shares `coins_amount` can buy in `self` with a custom total coins number. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u64 { + public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 { // No shares yet so amount is worth the same number of shares. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems. // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow. - coins_amount * pool.scaling_factor + coins_amount * self.scaling_factor } else { // Shares price = total_coins / total existing shares. // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount. // We rearrange the calc and do multiplication first to avoid rounding errors. - multiply_then_divide(pool, coins_amount, pool.total_shares, total_coins) + multiply_then_divide(self, coins_amount, self.total_shares, total_coins) } } - /// Return the number of coins `shares` are worth in `pool`. + /// Return the number of coins `shares` are worth in `self`. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount(pool: &Pool, shares: u64): u64 { - shares_to_amount_with_total_coins(pool, shares, pool.total_coins) + public fun shares_to_amount(self: &Pool, shares: u64): u64 { + shares_to_amount_with_total_coins(self, shares, self.total_coins) } - /// Return the number of coins `shares` are worth in `pool` with a custom total coins number. + /// Return the number of coins `shares` are worth in `self` with a custom total coins number. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u64, total_coins: u64): u64 { + public fun shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 { // No shares or coins yet so shares are worthless. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { 0 } else { // Shares price = total_coins / total existing shares. // Shares worth = shares * shares price = shares * total_coins / total existing shares. // We rearrange the calc and do multiplication first to avoid rounding errors. - multiply_then_divide(pool, shares, total_coins, pool.total_shares) + multiply_then_divide(self, shares, total_coins, self.total_shares) } } - public fun multiply_then_divide(_pool: &Pool, x: u64, y: u64, z: u64): u64 { + public fun multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 { let result = (to_u128(x) * to_u128(y)) / to_u128(z); (result as u64) } @@ -268,7 +268,7 @@ module aptos_std::pool_u64 { } #[test_only] - public fun destroy_pool(pool: Pool) { + public fun destroy_pool(self: Pool) { let Pool { shareholders_limit: _, total_coins: _, @@ -276,7 +276,7 @@ module aptos_std::pool_u64 { shares: _, shareholders: _, scaling_factor: _, - } = pool; + } = self; } #[test] diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move index 96e77747769e4..3ad0b4b6af1fc 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move @@ -30,9 +30,9 @@ spec aptos_std::pool_u64 { simple_map::spec_contains_key(pool.shares, shareholder) } - spec contains(pool: &Pool, shareholder: address): bool { + spec contains(self: &Pool, shareholder: address): bool { aborts_if false; - ensures result == spec_contains(pool, shareholder); + ensures result == spec_contains(self, shareholder); } spec fun spec_shares(pool: Pool, shareholder: address): u64 { @@ -44,62 +44,62 @@ spec aptos_std::pool_u64 { } } - spec shares(pool: &Pool, shareholder: address): u64 { + spec shares(self: &Pool, shareholder: address): u64 { aborts_if false; - ensures result == spec_shares(pool, shareholder); + ensures result == spec_shares(self, shareholder); } - spec balance(pool: &Pool, shareholder: address): u64 { - let shares = spec_shares(pool, shareholder); - let total_coins = pool.total_coins; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec balance(self: &Pool, shareholder: address): u64 { + let shares = spec_shares(self, shareholder); + let total_coins = self.total_coins; + aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } - spec buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u64 { - let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins); - aborts_if pool.total_coins + coins_amount > MAX_U64; - aborts_if pool.total_shares + new_shares > MAX_U64; + spec buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 { + let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins); + aborts_if self.total_coins + coins_amount > MAX_U64; + aborts_if self.total_shares + new_shares > MAX_U64; include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares }; include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares }; - ensures pool.total_coins == old(pool.total_coins) + coins_amount; - ensures pool.total_shares == old(pool.total_shares) + new_shares; + ensures self.total_coins == old(self.total_coins) + coins_amount; + ensures self.total_shares == old(self.total_shares) + new_shares; ensures result == new_shares; } - spec add_shares(pool: &mut Pool, shareholder: address, new_shares: u64): u64 { + spec add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 { include AddSharesAbortsIf; include AddSharesEnsures; - let key_exists = simple_map::spec_contains_key(pool.shares, shareholder); - ensures result == if (key_exists) { simple_map::spec_get(pool.shares, shareholder) } + let key_exists = simple_map::spec_contains_key(self.shares, shareholder); + ensures result == if (key_exists) { simple_map::spec_get(self.shares, shareholder) } else { new_shares }; } spec schema AddSharesAbortsIf { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = simple_map::spec_contains_key(pool.shares, shareholder); - let current_shares = simple_map::spec_get(pool.shares, shareholder); + let key_exists = simple_map::spec_contains_key(self.shares, shareholder); + let current_shares = simple_map::spec_get(self.shares, shareholder); aborts_if key_exists && current_shares + new_shares > MAX_U64; - aborts_if !key_exists && new_shares > 0 && len(pool.shareholders) >= pool.shareholders_limit; + aborts_if !key_exists && new_shares > 0 && len(self.shareholders) >= self.shareholders_limit; } spec schema AddSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = simple_map::spec_contains_key(pool.shares, shareholder); - let current_shares = simple_map::spec_get(pool.shares, shareholder); + let key_exists = simple_map::spec_contains_key(self.shares, shareholder); + let current_shares = simple_map::spec_get(self.shares, shareholder); ensures key_exists ==> - pool.shares == simple_map::spec_set(old(pool.shares), shareholder, current_shares + new_shares); + self.shares == simple_map::spec_set(old(self.shares), shareholder, current_shares + new_shares); ensures (!key_exists && new_shares > 0) ==> - pool.shares == simple_map::spec_set(old(pool.shares), shareholder, new_shares); + self.shares == simple_map::spec_set(old(self.shares), shareholder, new_shares); ensures (!key_exists && new_shares > 0) ==> - vector::eq_push_back(pool.shareholders, old(pool.shareholders), shareholder); + vector::eq_push_back(self.shareholders, old(self.shareholders), shareholder); } spec fun spec_amount_to_shares_with_total_coins(pool: Pool, coins_amount: u64, total_coins: u64): u64 { @@ -111,19 +111,19 @@ spec aptos_std::pool_u64 { } } - spec amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u64 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (coins_amount * pool.total_shares) / total_coins > MAX_U64; - aborts_if (pool.total_coins == 0 || pool.total_shares == 0) - && coins_amount * pool.scaling_factor > MAX_U64; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0; - ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins); + spec amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (coins_amount * self.total_shares) / total_coins > MAX_U64; + aborts_if (self.total_coins == 0 || self.total_shares == 0) + && coins_amount * self.scaling_factor > MAX_U64; + aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0; + ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins); } - spec shares_to_amount_with_total_coins(pool: &Pool, shares: u64, total_coins: u64): u64 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } spec fun spec_shares_to_amount_with_total_coins(pool: Pool, shares: u64, total_coins: u64): u64 { @@ -135,52 +135,54 @@ spec aptos_std::pool_u64 { } } - spec multiply_then_divide(_pool: &Pool, x: u64, y: u64, z: u64): u64 { + spec multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 { aborts_if z == 0; aborts_if (x * y) / z > MAX_U64; ensures result == (x * y) / z; } - spec redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { - let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins); - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < shares_to_redeem; - aborts_if pool.total_coins < redeemed_coins; - aborts_if pool.total_shares < shares_to_redeem; - ensures pool.total_coins == old(pool.total_coins) - redeemed_coins; - ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem; - include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem }; + spec redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { + let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins); + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < shares_to_redeem; + aborts_if self.total_coins < redeemed_coins; + aborts_if self.total_shares < shares_to_redeem; + ensures self.total_coins == old(self.total_coins) - redeemed_coins; + ensures self.total_shares == old(self.total_shares) - shares_to_redeem; + include shares_to_redeem > 0 ==> DeductSharesEnsures { + num_shares: shares_to_redeem + }; ensures result == redeemed_coins; } spec transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64 ) { pragma aborts_if_is_partial; - aborts_if !spec_contains(pool, shareholder_1); - aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer; + aborts_if !spec_contains(self, shareholder_1); + aborts_if spec_shares(self, shareholder_1) < shares_to_transfer; // TODO: difficult to specify due to the intermediate state problem. } - spec deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u64): u64 { - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < num_shares; + spec deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 { + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < num_shares; include DeductSharesEnsures; - let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> result == simple_map::spec_get(pool.shares, shareholder); + let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> result == simple_map::spec_get(self.shares, shareholder); ensures remaining_shares == 0 ==> result == 0; } spec schema DeductSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; num_shares: u64; - let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> simple_map::spec_get(pool.shares, shareholder) == remaining_shares; - ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(pool.shares, shareholder); - ensures remaining_shares == 0 ==> !vector::spec_contains(pool.shareholders, shareholder); + let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> simple_map::spec_get(self.shares, shareholder) == remaining_shares; + ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(self.shares, shareholder); + ensures remaining_shares == 0 ==> !vector::spec_contains(self.shareholders, shareholder); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move index c9ab78e3b52a8..d2b1827315194 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move @@ -69,193 +69,193 @@ module aptos_std::pool_u64_unbound { } /// Destroy an empty pool. This will fail if the pool has any balance of coins. - public fun destroy_empty(pool: Pool) { - assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); + public fun destroy_empty(self: Pool) { + assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); let Pool { total_coins: _, total_shares: _, shares, scaling_factor: _, - } = pool; + } = self; table::destroy_empty(shares); } - /// Return `pool`'s total balance of coins. - public fun total_coins(pool: &Pool): u64 { - pool.total_coins + /// Return `self`'s total balance of coins. + public fun total_coins(self: &Pool): u64 { + self.total_coins } - /// Return the total number of shares across all shareholders in `pool`. - public fun total_shares(pool: &Pool): u128 { - pool.total_shares + /// Return the total number of shares across all shareholders in `self`. + public fun total_shares(self: &Pool): u128 { + self.total_shares } - /// Return true if `shareholder` is in `pool`. - public fun contains(pool: &Pool, shareholder: address): bool { - table::contains(&pool.shares, shareholder) + /// Return true if `shareholder` is in `self`. + public fun contains(self: &Pool, shareholder: address): bool { + table::contains(&self.shares, shareholder) } - /// Return the number of shares of `stakeholder` in `pool`. - public fun shares(pool: &Pool, shareholder: address): u128 { - if (contains(pool, shareholder)) { - *table::borrow(&pool.shares, shareholder) + /// Return the number of shares of `stakeholder` in `self`. + public fun shares(self: &Pool, shareholder: address): u128 { + if (contains(self, shareholder)) { + *table::borrow(&self.shares, shareholder) } else { 0 } } - /// Return the balance in coins of `shareholder` in `pool.` - public fun balance(pool: &Pool, shareholder: address): u64 { - let num_shares = shares(pool, shareholder); - shares_to_amount(pool, num_shares) + /// Return the balance in coins of `shareholder` in `self`. + public fun balance(self: &Pool, shareholder: address): u64 { + let num_shares = shares(self, shareholder); + shares_to_amount(self, num_shares) } - /// Return the number of shareholders in `pool`. - public fun shareholders_count(pool: &Pool): u64 { - table::length(&pool.shares) + /// Return the number of shareholders in `self`. + public fun shareholders_count(self: &Pool): u64 { + table::length(&self.shares) } - /// Update `pool`'s total balance of coins. - public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) { - pool.total_coins = new_total_coins; + /// Update `self`'s total balance of coins. + public fun update_total_coins(self: &mut Pool, new_total_coins: u64) { + self.total_coins = new_total_coins; } /// Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. - public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u128 { + public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 { if (coins_amount == 0) return 0; - let new_shares = amount_to_shares(pool, coins_amount); - assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); - assert!(MAX_U128 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW)); + let new_shares = amount_to_shares(self, coins_amount); + assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); + assert!(MAX_U128 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW)); - pool.total_coins = pool.total_coins + coins_amount; - pool.total_shares = pool.total_shares + new_shares; - add_shares(pool, shareholder, new_shares); + self.total_coins = self.total_coins + coins_amount; + self.total_shares = self.total_shares + new_shares; + add_shares(self, shareholder, new_shares); new_shares } - /// Add the number of shares directly for `shareholder` in `pool`. + /// Add the number of shares directly for `shareholder` in `self`. /// This would dilute other shareholders if the pool's balance of coins didn't change. - fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u128): u128 { - if (contains(pool, shareholder)) { - let existing_shares = table::borrow_mut(&mut pool.shares, shareholder); + fun add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 { + if (contains(self, shareholder)) { + let existing_shares = table::borrow_mut(&mut self.shares, shareholder); let current_shares = *existing_shares; assert!(MAX_U128 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW)); *existing_shares = current_shares + new_shares; *existing_shares } else if (new_shares > 0) { - table::add(&mut pool.shares, shareholder, new_shares); + table::add(&mut self.shares, shareholder, new_shares); new_shares } else { new_shares } } - /// Allow `shareholder` to redeem their shares in `pool` for coins. - public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Allow `shareholder` to redeem their shares in `self` for coins. + public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { + assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(shares(self, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_redeem == 0) return 0; - let redeemed_coins = shares_to_amount(pool, shares_to_redeem); - pool.total_coins = pool.total_coins - redeemed_coins; - pool.total_shares = pool.total_shares - shares_to_redeem; - deduct_shares(pool, shareholder, shares_to_redeem); + let redeemed_coins = shares_to_amount(self, shares_to_redeem); + self.total_coins = self.total_coins - redeemed_coins; + self.total_shares = self.total_shares - shares_to_redeem; + deduct_shares(self, shareholder, shares_to_redeem); redeemed_coins } /// Transfer shares from `shareholder_1` to `shareholder_2`. public fun transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128, ) { - assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); + assert!(contains(self, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(shares(self, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_transfer == 0) return; - deduct_shares(pool, shareholder_1, shares_to_transfer); - add_shares(pool, shareholder_2, shares_to_transfer); + deduct_shares(self, shareholder_1, shares_to_transfer); + add_shares(self, shareholder_2, shares_to_transfer); } - /// Directly deduct `shareholder`'s number of shares in `pool` and return the number of remaining shares. - fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u128): u128 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Directly deduct `shareholder`'s number of shares in `self` and return the number of remaining shares. + fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 { + assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(shares(self, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); - let existing_shares = table::borrow_mut(&mut pool.shares, shareholder); + let existing_shares = table::borrow_mut(&mut self.shares, shareholder); *existing_shares = *existing_shares - num_shares; // Remove the shareholder completely if they have no shares left. let remaining_shares = *existing_shares; if (remaining_shares == 0) { - table::remove(&mut pool.shares, shareholder); + table::remove(&mut self.shares, shareholder); }; remaining_shares } - /// Return the number of new shares `coins_amount` can buy in `pool`. + /// Return the number of new shares `coins_amount` can buy in `self`. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares(pool: &Pool, coins_amount: u64): u128 { - amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins) + public fun amount_to_shares(self: &Pool, coins_amount: u64): u128 { + amount_to_shares_with_total_coins(self, coins_amount, self.total_coins) } - /// Return the number of new shares `coins_amount` can buy in `pool` with a custom total coins number. + /// Return the number of new shares `coins_amount` can buy in `self` with a custom total coins number. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u128 { + public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 { // No shares yet so amount is worth the same number of shares. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems. // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow. - to_u128(coins_amount) * to_u128(pool.scaling_factor) + to_u128(coins_amount) * to_u128(self.scaling_factor) } else { // Shares price = total_coins / total existing shares. // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount. // We rearrange the calc and do multiplication first to avoid rounding errors. - multiply_then_divide(pool, to_u128(coins_amount), pool.total_shares, to_u128(total_coins)) + multiply_then_divide(self, to_u128(coins_amount), self.total_shares, to_u128(total_coins)) } } - /// Return the number of coins `shares` are worth in `pool`. + /// Return the number of coins `shares` are worth in `self`. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount(pool: &Pool, shares: u128): u64 { - shares_to_amount_with_total_coins(pool, shares, pool.total_coins) + public fun shares_to_amount(self: &Pool, shares: u128): u64 { + shares_to_amount_with_total_coins(self, shares, self.total_coins) } - /// Return the number of coins `shares` are worth in `pool` with a custom total coins number. + /// Return the number of coins `shares` are worth in `self` with a custom total coins number. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u128, total_coins: u64): u64 { + public fun shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 { // No shares or coins yet so shares are worthless. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { 0 } else { // Shares price = total_coins / total existing shares. // Shares worth = shares * shares price = shares * total_coins / total existing shares. // We rearrange the calc and do multiplication first to avoid rounding errors. - (multiply_then_divide(pool, shares, to_u128(total_coins), pool.total_shares) as u64) + (multiply_then_divide(self, shares, to_u128(total_coins), self.total_shares) as u64) } } /// Return the number of coins `shares` are worth in `pool` with custom total coins and shares numbers. public fun shares_to_amount_with_total_stats( - pool: &Pool, + self: &Pool, shares: u128, total_coins: u64, total_shares: u128, ): u64 { - if (pool.total_coins == 0 || total_shares == 0) { + if (self.total_coins == 0 || total_shares == 0) { 0 } else { - (multiply_then_divide(pool, shares, to_u128(total_coins), total_shares) as u64) + (multiply_then_divide(self, shares, to_u128(total_coins), total_shares) as u64) } } - public fun multiply_then_divide(_pool: &Pool, x: u128, y: u128, z: u128): u128 { + public fun multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 { let result = (to_u256(x) * to_u256(y)) / to_u256(z); (result as u128) } diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move index 2a8570883ebea..c51b8464c6a94 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move @@ -15,9 +15,9 @@ spec aptos_std::pool_u64_unbound { table::spec_contains(pool.shares, shareholder) } - spec contains(pool: &Pool, shareholder: address): bool { + spec contains(self: &Pool, shareholder: address): bool { aborts_if false; - ensures result == spec_contains(pool, shareholder); + ensures result == spec_contains(self, shareholder); } spec fun spec_shares(pool: Pool, shareholder: address): u64 { @@ -29,59 +29,59 @@ spec aptos_std::pool_u64_unbound { } } - spec shares(pool: &Pool, shareholder: address): u128 { + spec shares(self: &Pool, shareholder: address): u128 { aborts_if false; - ensures result == spec_shares(pool, shareholder); + ensures result == spec_shares(self, shareholder); } - spec balance(pool: &Pool, shareholder: address): u64 { - let shares = spec_shares(pool, shareholder); - let total_coins = pool.total_coins; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec balance(self: &Pool, shareholder: address): u64 { + let shares = spec_shares(self, shareholder); + let total_coins = self.total_coins; + aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } - spec buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u128 { - let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins); - aborts_if pool.total_coins + coins_amount > MAX_U64; - aborts_if pool.total_shares + new_shares > MAX_U128; + spec buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 { + let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins); + aborts_if self.total_coins + coins_amount > MAX_U64; + aborts_if self.total_shares + new_shares > MAX_U128; include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares }; include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares }; - ensures pool.total_coins == old(pool.total_coins) + coins_amount; - ensures pool.total_shares == old(pool.total_shares) + new_shares; + ensures self.total_coins == old(self.total_coins) + coins_amount; + ensures self.total_shares == old(self.total_shares) + new_shares; ensures result == new_shares; } - spec add_shares(pool: &mut Pool, shareholder: address, new_shares: u128): u128 { + spec add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 { include AddSharesAbortsIf; include AddSharesEnsures; - let key_exists = table::spec_contains(pool.shares, shareholder); - ensures result == if (key_exists) { table::spec_get(pool.shares, shareholder) } + let key_exists = table::spec_contains(self.shares, shareholder); + ensures result == if (key_exists) { table::spec_get(self.shares, shareholder) } else { new_shares }; } spec schema AddSharesAbortsIf { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = table::spec_contains(pool.shares, shareholder); - let current_shares = table::spec_get(pool.shares, shareholder); + let key_exists = table::spec_contains(self.shares, shareholder); + let current_shares = table::spec_get(self.shares, shareholder); aborts_if key_exists && current_shares + new_shares > MAX_U128; } spec schema AddSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = table::spec_contains(pool.shares, shareholder); - let current_shares = table::spec_get(pool.shares, shareholder); + let key_exists = table::spec_contains(self.shares, shareholder); + let current_shares = table::spec_get(self.shares, shareholder); ensures key_exists ==> - pool.shares == table::spec_set(old(pool.shares), shareholder, current_shares + new_shares); + self.shares == table::spec_set(old(self.shares), shareholder, current_shares + new_shares); ensures (!key_exists && new_shares > 0) ==> - pool.shares == table::spec_set(old(pool.shares), shareholder, new_shares); + self.shares == table::spec_set(old(self.shares), shareholder, new_shares); } spec fun spec_amount_to_shares_with_total_coins(pool: Pool, coins_amount: u64, total_coins: u64): u128 { @@ -93,19 +93,19 @@ spec aptos_std::pool_u64_unbound { } } - spec amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u128 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (coins_amount * pool.total_shares) / total_coins > MAX_U128; - aborts_if (pool.total_coins == 0 || pool.total_shares == 0) - && coins_amount * pool.scaling_factor > MAX_U128; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0; - ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins); + spec amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (coins_amount * self.total_shares) / total_coins > MAX_U128; + aborts_if (self.total_coins == 0 || self.total_shares == 0) + && coins_amount * self.scaling_factor > MAX_U128; + aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0; + ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins); } - spec shares_to_amount_with_total_coins(pool: &Pool, shares: u128, total_coins: u64): u64 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } spec fun spec_shares_to_amount_with_total_coins(pool: Pool, shares: u128, total_coins: u64): u64 { @@ -117,63 +117,66 @@ spec aptos_std::pool_u64_unbound { } } - spec multiply_then_divide(_pool: &Pool, x: u128, y: u128, z: u128): u128 { + spec multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 { aborts_if z == 0; aborts_if (x * y) / z > MAX_U128; ensures result == (x * y) / z; } - spec redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { - let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins); - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < shares_to_redeem; - aborts_if pool.total_coins < redeemed_coins; - aborts_if pool.total_shares < shares_to_redeem; - ensures pool.total_coins == old(pool.total_coins) - redeemed_coins; - ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem; - include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem }; + spec redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { + let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins); + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < shares_to_redeem; + aborts_if self.total_coins < redeemed_coins; + aborts_if self.total_shares < shares_to_redeem; + ensures self.total_coins == old(self.total_coins) - redeemed_coins; + ensures self.total_shares == old(self.total_shares) - shares_to_redeem; + include shares_to_redeem > 0 ==> DeductSharesEnsures { + num_shares: shares_to_redeem + }; ensures result == redeemed_coins; } spec transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128 ) { - aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(pool, shareholder_2) && - (spec_shares(pool, shareholder_2) + shares_to_transfer > MAX_U128); - aborts_if !spec_contains(pool, shareholder_1); - aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer; - ensures shareholder_1 == shareholder_2 ==> spec_shares(old(pool), shareholder_1) == spec_shares(pool, shareholder_1); - ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) == shares_to_transfer)) ==> - !spec_contains(pool, shareholder_1); + aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(self, shareholder_2) && + (spec_shares(self, shareholder_2) + shares_to_transfer > MAX_U128); + aborts_if !spec_contains(self, shareholder_1); + aborts_if spec_shares(self, shareholder_1) < shares_to_transfer; + ensures shareholder_1 == shareholder_2 ==> spec_shares(old(self), shareholder_1) == spec_shares( + self, shareholder_1); + ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) == shares_to_transfer)) ==> + !spec_contains(self, shareholder_1); ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0) ==> - (spec_contains(pool, shareholder_2)); - ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(pool), shareholder_2)) ==> - (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == shares_to_transfer); - ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(pool), shareholder_2)) ==> - (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == spec_shares(old(pool), shareholder_2) + shares_to_transfer); - ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) > shares_to_transfer)) ==> - (spec_contains(pool, shareholder_1) && (spec_shares(pool, shareholder_1) == spec_shares(old(pool), shareholder_1) - shares_to_transfer)); + (spec_contains(self, shareholder_2)); + ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(self), shareholder_2)) ==> + (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == shares_to_transfer); + ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(self), shareholder_2)) ==> + (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == spec_shares(old(self), shareholder_2) + shares_to_transfer); + ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) > shares_to_transfer)) ==> + (spec_contains(self, shareholder_1) && (spec_shares(self, shareholder_1) == spec_shares(old(self), shareholder_1) - shares_to_transfer)); } - spec deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u128): u128 { - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < num_shares; + spec deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 { + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < num_shares; include DeductSharesEnsures; - let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> result == table::spec_get(pool.shares, shareholder); + let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> result == table::spec_get(self.shares, shareholder); ensures remaining_shares == 0 ==> result == 0; } spec schema DeductSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; num_shares: u64; - let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> table::spec_get(pool.shares, shareholder) == remaining_shares; - ensures remaining_shares == 0 ==> !table::spec_contains(pool.shares, shareholder); + let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> table::spec_get(self.shares, shareholder) == remaining_shares; + ensures remaining_shares == 0 ==> !table::spec_contains(self.shares, shareholder); } spec to_u128(num: u64): u128 { diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.move index 98ae46cf6b30d..d672065b1467d 100644 --- a/aptos-move/framework/aptos-stdlib/sources/simple_map.move +++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.move @@ -23,8 +23,8 @@ module aptos_std::simple_map { value: Value, } - public fun length(map: &SimpleMap): u64 { - vector::length(&map.data) + public fun length(self: &SimpleMap): u64 { + vector::length(&self.data) } /// Create an empty SimpleMap. @@ -52,68 +52,68 @@ module aptos_std::simple_map { } public fun borrow( - map: &SimpleMap, + self: &SimpleMap, key: &Key, ): &Value { - let maybe_idx = find(map, key); + let maybe_idx = find(self, key); assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND)); let idx = option::extract(&mut maybe_idx); - &vector::borrow(&map.data, idx).value + &vector::borrow(&self.data, idx).value } public fun borrow_mut( - map: &mut SimpleMap, + self: &mut SimpleMap, key: &Key, ): &mut Value { - let maybe_idx = find(map, key); + let maybe_idx = find(self, key); assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND)); let idx = option::extract(&mut maybe_idx); - &mut vector::borrow_mut(&mut map.data, idx).value + &mut vector::borrow_mut(&mut self.data, idx).value } public fun contains_key( - map: &SimpleMap, + self: &SimpleMap, key: &Key, ): bool { - let maybe_idx = find(map, key); + let maybe_idx = find(self, key); option::is_some(&maybe_idx) } - public fun destroy_empty(map: SimpleMap) { - let SimpleMap { data } = map; + public fun destroy_empty(self: SimpleMap) { + let SimpleMap { data } = self; vector::destroy_empty(data); } /// Add a key/value pair to the map. The key must not already exist. public fun add( - map: &mut SimpleMap, + self: &mut SimpleMap, key: Key, value: Value, ) { - let maybe_idx = find(map, &key); + let maybe_idx = find(self, &key); assert!(option::is_none(&maybe_idx), error::invalid_argument(EKEY_ALREADY_EXISTS)); - vector::push_back(&mut map.data, Element { key, value }); + vector::push_back(&mut self.data, Element { key, value }); } /// Add multiple key/value pairs to the map. The keys must not already exist. public fun add_all( - map: &mut SimpleMap, + self: &mut SimpleMap, keys: vector, values: vector, ) { vector::zip(keys, values, |key, value| { - add(map, key, value); + add(self, key, value); }); } /// Insert key/value pair or update an existing key to a new value public fun upsert( - map: &mut SimpleMap, + self: &mut SimpleMap, key: Key, value: Value ): (std::option::Option, std::option::Option) { - let data = &mut map.data; + let data = &mut self.data; let len = vector::length(data); let i = 0; while (i < len) { @@ -126,21 +126,21 @@ module aptos_std::simple_map { }; i = i + 1; }; - vector::push_back(&mut map.data, Element { key, value }); + vector::push_back(&mut self.data, Element { key, value }); (std::option::none(), std::option::none()) } /// Return all keys in the map. This requires keys to be copyable. - public fun keys(map: &SimpleMap): vector { - vector::map_ref(&map.data, |e| { + public fun keys(self: &SimpleMap): vector { + vector::map_ref(&self.data, |e| { let e: &Element = e; e.key }) } /// Return all values in the map. This requires values to be copyable. - public fun values(map: &SimpleMap): vector { - vector::map_ref(&map.data, |e| { + public fun values(self: &SimpleMap): vector { + vector::map_ref(&self.data, |e| { let e: &Element = e; e.value }) @@ -149,10 +149,10 @@ module aptos_std::simple_map { /// Transform the map into two vectors with the keys and values respectively /// Primarily used to destroy a map public fun to_vec_pair( - map: SimpleMap): (vector, vector) { + self: SimpleMap): (vector, vector) { let keys: vector = vector::empty(); let values: vector = vector::empty(); - let SimpleMap { data } = map; + let SimpleMap { data } = self; vector::for_each(data, |e| { let Element { key, value } = e; vector::push_back(&mut keys, key); @@ -164,35 +164,35 @@ module aptos_std::simple_map { /// For maps that cannot be dropped this is a utility to destroy them /// using lambdas to destroy the individual keys and values. public inline fun destroy( - map: SimpleMap, + self: SimpleMap, dk: |Key|, dv: |Value| ) { - let (keys, values) = to_vec_pair(map); + let (keys, values) = to_vec_pair(self); vector::destroy(keys, |_k| dk(_k)); vector::destroy(values, |_v| dv(_v)); } /// Remove a key/value pair from the map. The key must exist. public fun remove( - map: &mut SimpleMap, + self: &mut SimpleMap, key: &Key, ): (Key, Value) { - let maybe_idx = find(map, key); + let maybe_idx = find(self, key); assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND)); let placement = option::extract(&mut maybe_idx); - let Element { key, value } = vector::swap_remove(&mut map.data, placement); + let Element { key, value } = vector::swap_remove(&mut self.data, placement); (key, value) } fun find( - map: &SimpleMap, + self: &SimpleMap, key: &Key, ): option::Option { - let leng = vector::length(&map.data); + let leng = vector::length(&self.data); let i = 0; while (i < leng) { - let element = vector::borrow(&map.data, i); + let element = vector::borrow(&self.data, i); if (&element.key == key) { return option::some(i) }; diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move index 35258eb37532d..f0bcd5a759fc8 100644 --- a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move @@ -89,30 +89,32 @@ spec aptos_std::simple_map { spec_get(result, vector::borrow(keys, i)) == vector::borrow(values, i); } - spec to_vec_pair(map: SimpleMap): (vector, vector) { + spec to_vec_pair(self: SimpleMap): (vector, vector) { pragma intrinsic; pragma opaque; ensures [abstract] forall k: Key: vector::spec_contains(result_1, k) <==> - spec_contains_key(map, k); + spec_contains_key(self, k); ensures [abstract] forall i in 0..len(result_1): - spec_get(map, vector::borrow(result_1, i)) == vector::borrow(result_2, i); + spec_get(self, vector::borrow(result_1, i)) == vector::borrow(result_2, i); } spec upsert( - map: &mut SimpleMap, + self: &mut SimpleMap, key: Key, value: Value ): (std::option::Option, std::option::Option) { pragma intrinsic; pragma opaque; aborts_if [abstract] false; - ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_1); - ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_2); - ensures [abstract] spec_contains_key(map, key); - ensures [abstract] spec_get(map, key) == value; - ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key)); - ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(map), key))); + ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_1); + ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_2); + ensures [abstract] spec_contains_key(self, key); + ensures [abstract] spec_get(self, key) == value; + ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key)); + ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old( + self + ), key))); } // Specification functions for tables diff --git a/aptos-move/framework/aptos-stdlib/sources/table.move b/aptos-move/framework/aptos-stdlib/sources/table.move index dbc85209dd8e0..f96936f29f6f9 100644 --- a/aptos-move/framework/aptos-stdlib/sources/table.move +++ b/aptos-move/framework/aptos-stdlib/sources/table.move @@ -23,73 +23,73 @@ module aptos_std::table { /// Add a new entry to the table. Aborts if an entry for this /// key already exists. The entry itself is not stored in the /// table, and cannot be discovered from it. - public fun add(table: &mut Table, key: K, val: V) { - add_box>(table, key, Box { val }) + public fun add(self: &mut Table, key: K, val: V) { + add_box>(self, key, Box { val }) } /// Acquire an immutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow(table: &Table, key: K): &V { - &borrow_box>(table, key).val + public fun borrow(self: &Table, key: K): &V { + &borrow_box>(self, key).val } /// Acquire an immutable reference to the value which `key` maps to. /// Returns specified default value if there is no entry for `key`. - public fun borrow_with_default(table: &Table, key: K, default: &V): &V { - if (!contains(table, copy key)) { + public fun borrow_with_default(self: &Table, key: K, default: &V): &V { + if (!contains(self, copy key)) { default } else { - borrow(table, copy key) + borrow(self, copy key) } } /// Acquire a mutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow_mut(table: &mut Table, key: K): &mut V { - &mut borrow_box_mut>(table, key).val + public fun borrow_mut(self: &mut Table, key: K): &mut V { + &mut borrow_box_mut>(self, key).val } /// Acquire a mutable reference to the value which `key` maps to. /// Insert the pair (`key`, `default`) first if there is no entry for `key`. - public fun borrow_mut_with_default(table: &mut Table, key: K, default: V): &mut V { - if (!contains(table, copy key)) { - add(table, copy key, default) + public fun borrow_mut_with_default(self: &mut Table, key: K, default: V): &mut V { + if (!contains(self, copy key)) { + add(self, copy key, default) }; - borrow_mut(table, key) + borrow_mut(self, key) } /// Insert the pair (`key`, `value`) if there is no entry for `key`. /// update the value of the entry for `key` to `value` otherwise - public fun upsert(table: &mut Table, key: K, value: V) { - if (!contains(table, copy key)) { - add(table, copy key, value) + public fun upsert(self: &mut Table, key: K, value: V) { + if (!contains(self, copy key)) { + add(self, copy key, value) } else { - let ref = borrow_mut(table, key); + let ref = borrow_mut(self, key); *ref = value; }; } - /// Remove from `table` and return the value which `key` maps to. + /// Remove from `self` and return the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun remove(table: &mut Table, key: K): V { - let Box { val } = remove_box>(table, key); + public fun remove(self: &mut Table, key: K): V { + let Box { val } = remove_box>(self, key); val } - /// Returns true iff `table` contains an entry for `key`. - public fun contains(table: &Table, key: K): bool { - contains_box>(table, key) + /// Returns true iff `self` contains an entry for `key`. + public fun contains(self: &Table, key: K): bool { + contains_box>(self, key) } #[test_only] /// Testing only: allows to drop a table even if it is not empty. - public fun drop_unchecked(table: Table) { - drop_unchecked_box>(table) + public fun drop_unchecked(self: Table) { + drop_unchecked_box>(self) } - public(friend) fun destroy(table: Table) { - destroy_empty_box>(&table); - drop_unchecked_box>(table) + public(friend) fun destroy(self: Table) { + destroy_empty_box>(&self); + drop_unchecked_box>(self) } #[test_only] diff --git a/aptos-move/framework/aptos-stdlib/sources/table_with_length.move b/aptos-move/framework/aptos-stdlib/sources/table_with_length.move index c56ff2b4224fc..e4ca2415bc939 100644 --- a/aptos-move/framework/aptos-stdlib/sources/table_with_length.move +++ b/aptos-move/framework/aptos-stdlib/sources/table_with_length.move @@ -25,84 +25,84 @@ module aptos_std::table_with_length { } /// Destroy a table. The table must be empty to succeed. - public fun destroy_empty(table: TableWithLength) { - assert!(table.length == 0, error::invalid_state(ENOT_EMPTY)); - let TableWithLength { inner, length: _ } = table; + public fun destroy_empty(self: TableWithLength) { + assert!(self.length == 0, error::invalid_state(ENOT_EMPTY)); + let TableWithLength { inner, length: _ } = self; table::destroy(inner) } /// Add a new entry to the table. Aborts if an entry for this /// key already exists. The entry itself is not stored in the /// table, and cannot be discovered from it. - public fun add(table: &mut TableWithLength, key: K, val: V) { - table::add(&mut table.inner, key, val); - table.length = table.length + 1; + public fun add(self: &mut TableWithLength, key: K, val: V) { + table::add(&mut self.inner, key, val); + self.length = self.length + 1; } /// Acquire an immutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow(table: &TableWithLength, key: K): &V { - table::borrow(&table.inner, key) + public fun borrow(self: &TableWithLength, key: K): &V { + table::borrow(&self.inner, key) } /// Acquire a mutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow_mut(table: &mut TableWithLength, key: K): &mut V { - table::borrow_mut(&mut table.inner, key) + public fun borrow_mut(self: &mut TableWithLength, key: K): &mut V { + table::borrow_mut(&mut self.inner, key) } /// Returns the length of the table, i.e. the number of entries. - public fun length(table: &TableWithLength): u64 { - table.length + public fun length(self: &TableWithLength): u64 { + self.length } /// Returns true if this table is empty. - public fun empty(table: &TableWithLength): bool { - table.length == 0 + public fun empty(self: &TableWithLength): bool { + self.length == 0 } /// Acquire a mutable reference to the value which `key` maps to. /// Insert the pair (`key`, `default`) first if there is no entry for `key`. - public fun borrow_mut_with_default(table: &mut TableWithLength, key: K, default: V): &mut V { - if (table::contains(&table.inner, key)) { - table::borrow_mut(&mut table.inner, key) + public fun borrow_mut_with_default(self: &mut TableWithLength, key: K, default: V): &mut V { + if (table::contains(&self.inner, key)) { + table::borrow_mut(&mut self.inner, key) } else { - table::add(&mut table.inner, key, default); - table.length = table.length + 1; - table::borrow_mut(&mut table.inner, key) + table::add(&mut self.inner, key, default); + self.length = self.length + 1; + table::borrow_mut(&mut self.inner, key) } } /// Insert the pair (`key`, `value`) if there is no entry for `key`. /// update the value of the entry for `key` to `value` otherwise - public fun upsert(table: &mut TableWithLength, key: K, value: V) { - if (!table::contains(&table.inner, key)) { - add(table, copy key, value) + public fun upsert(self: &mut TableWithLength, key: K, value: V) { + if (!table::contains(&self.inner, key)) { + add(self, copy key, value) } else { - let ref = table::borrow_mut(&mut table.inner, key); + let ref = table::borrow_mut(&mut self.inner, key); *ref = value; }; } /// Remove from `table` and return the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun remove(table: &mut TableWithLength, key: K): V { - let val = table::remove(&mut table.inner, key); - table.length = table.length - 1; + public fun remove(self: &mut TableWithLength, key: K): V { + let val = table::remove(&mut self.inner, key); + self.length = self.length - 1; val } /// Returns true iff `table` contains an entry for `key`. - public fun contains(table: &TableWithLength, key: K): bool { - table::contains(&table.inner, key) + public fun contains(self: &TableWithLength, key: K): bool { + table::contains(&self.inner, key) } #[test_only] /// Drop table even if not empty, only when testing. - public fun drop_unchecked(table: TableWithLength) { + public fun drop_unchecked(self: TableWithLength) { // Unpack table with length, dropping length count but not // inner table. - let TableWithLength{inner, length: _} = table; + let TableWithLength{inner, length: _} = self; table::drop_unchecked(inner); // Drop inner table. } diff --git a/aptos-move/framework/aptos-stdlib/sources/type_info.move b/aptos-move/framework/aptos-stdlib/sources/type_info.move index 2ad3bba4041cc..b790472faf458 100644 --- a/aptos-move/framework/aptos-stdlib/sources/type_info.move +++ b/aptos-move/framework/aptos-stdlib/sources/type_info.move @@ -2,7 +2,6 @@ module aptos_std::type_info { use std::bcs; use std::features; use std::string::{Self, String}; - use std::vector; // // Error codes @@ -24,16 +23,16 @@ module aptos_std::type_info { // Public functions // - public fun account_address(type_info: &TypeInfo): address { - type_info.account_address + public fun account_address(self: &TypeInfo): address { + self.account_address } - public fun module_name(type_info: &TypeInfo): vector { - type_info.module_name + public fun module_name(self: &TypeInfo): vector { + self.module_name } - public fun struct_name(type_info: &TypeInfo): vector { - type_info.struct_name + public fun struct_name(self: &TypeInfo): vector { + self.struct_name } /// Returns the current chain ID, mirroring what `aptos_framework::chain_id::get()` would return, except in `#[test]` @@ -65,13 +64,15 @@ module aptos_std::type_info { /// nesting patterns, as well as `test_size_of_val_vectors()` for an /// analysis of vector size dynamism. public fun size_of_val(val_ref: &T): u64 { - // Return vector length of vectorized BCS representation. - vector::length(&bcs::to_bytes(val_ref)) + bcs::serialized_size(val_ref) } #[test_only] use aptos_std::table::Table; + #[test_only] + use std::vector; + #[test] fun test_type_of() { let type_info = type_of(); diff --git a/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move b/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move index ed3ed481c8008..26fe9b6163c6a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move @@ -30,7 +30,6 @@ spec aptos_std::type_info { } spec size_of_val(val_ref: &T): u64 { - aborts_if false; ensures result == spec_size_of_val(val_ref); } } diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/any.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/any.md new file mode 100644 index 0000000000000..4a8954d00d685 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/any.md @@ -0,0 +1,252 @@ + + + +# Module `0x1::any` + + + +- [Struct `Any`](#0x1_any_Any) +- [Constants](#@Constants_0) +- [Function `pack`](#0x1_any_pack) +- [Function `unpack`](#0x1_any_unpack) +- [Function `type_name`](#0x1_any_type_name) +- [Specification](#@Specification_1) + - [Function `pack`](#@Specification_1_pack) + - [Function `unpack`](#@Specification_1_unpack) + - [Function `type_name`](#@Specification_1_type_name) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::from_bcs;
+use 0x1::string;
+use 0x1::type_info;
+
+ + + + + +## Struct `Any` + +A type which can represent a value of any type. This allows for representation of 'unknown' future +values. For example, to define a resource such that it can be later be extended without breaking +changes one can do + +```move +struct Resource { +field: Type, +... +extension: Option +} +``` + + +
struct Any has drop, store
+
+ + + +
+Fields + + +
+
+type_name: string::String +
+
+ +
+
+data: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The type provided for unpack is not the same as was given for pack. + + +
const ETYPE_MISMATCH: u64 = 1;
+
+ + + + + +## Function `pack` + +Pack a value into the Any representation. Because Any can be stored and dropped, this is +also required from T. + + +
public fun pack<T: drop, store>(x: T): any::Any
+
+ + + +
+Implementation + + +
public fun pack<T: drop + store>(x: T): Any {
+    Any {
+        type_name: type_info::type_name<T>(),
+        data: to_bytes(&x)
+    }
+}
+
+ + + +
+ + + +## Function `unpack` + +Unpack a value from the Any representation. This aborts if the value has not the expected type T. + + +
public fun unpack<T>(self: any::Any): T
+
+ + + +
+Implementation + + +
public fun unpack<T>(self: Any): T {
+    assert!(type_info::type_name<T>() == self.type_name, error::invalid_argument(ETYPE_MISMATCH));
+    from_bytes<T>(self.data)
+}
+
+ + + +
+ + + +## Function `type_name` + +Returns the type name of this Any + + +
public fun type_name(self: &any::Any): &string::String
+
+ + + +
+Implementation + + +
public fun type_name(self: &Any): &String {
+    &self.type_name
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `pack` + + +
public fun pack<T: drop, store>(x: T): any::Any
+
+ + + + +
aborts_if false;
+ensures result == Any {
+    type_name: type_info::type_name<T>(),
+    data: bcs::serialize<T>(x)
+};
+ensures [abstract] from_bcs::deserializable<T>(result.data);
+
+ + + + + +### Function `unpack` + + +
public fun unpack<T>(self: any::Any): T
+
+ + + + +
include UnpackAbortsIf<T>;
+ensures result == from_bcs::deserialize<T>(self.data);
+
+ + + + + + + +
schema UnpackAbortsIf<T> {
+    self: Any;
+    aborts_if type_info::type_name<T>() != self.type_name;
+    aborts_if !from_bcs::deserializable<T>(self.data);
+}
+
+ + + + + + + +
schema UnpackRequirement<T> {
+    self: Any;
+    requires type_info::type_name<T>() == self.type_name;
+    requires from_bcs::deserializable<T>(self.data);
+}
+
+ + + + + +### Function `type_name` + + +
public fun type_name(self: &any::Any): &string::String
+
+ + + + +
aborts_if false;
+ensures result == self.type_name;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/big_vector.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/big_vector.md new file mode 100644 index 0000000000000..2e7a9a100357c --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/big_vector.md @@ -0,0 +1,1120 @@ + + + +# Module `0x1::big_vector` + + + +- [Struct `BigVector`](#0x1_big_vector_BigVector) +- [Constants](#@Constants_0) +- [Function `empty`](#0x1_big_vector_empty) +- [Function `singleton`](#0x1_big_vector_singleton) +- [Function `destroy_empty`](#0x1_big_vector_destroy_empty) +- [Function `destroy`](#0x1_big_vector_destroy) +- [Function `borrow`](#0x1_big_vector_borrow) +- [Function `borrow_mut`](#0x1_big_vector_borrow_mut) +- [Function `append`](#0x1_big_vector_append) +- [Function `push_back`](#0x1_big_vector_push_back) +- [Function `pop_back`](#0x1_big_vector_pop_back) +- [Function `remove`](#0x1_big_vector_remove) +- [Function `swap_remove`](#0x1_big_vector_swap_remove) +- [Function `swap`](#0x1_big_vector_swap) +- [Function `reverse`](#0x1_big_vector_reverse) +- [Function `index_of`](#0x1_big_vector_index_of) +- [Function `contains`](#0x1_big_vector_contains) +- [Function `to_vector`](#0x1_big_vector_to_vector) +- [Function `length`](#0x1_big_vector_length) +- [Function `is_empty`](#0x1_big_vector_is_empty) +- [Specification](#@Specification_1) + - [Struct `BigVector`](#@Specification_1_BigVector) + - [Function `empty`](#@Specification_1_empty) + - [Function `singleton`](#@Specification_1_singleton) + - [Function `destroy_empty`](#@Specification_1_destroy_empty) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `borrow_mut`](#@Specification_1_borrow_mut) + - [Function `append`](#@Specification_1_append) + - [Function `push_back`](#@Specification_1_push_back) + - [Function `pop_back`](#@Specification_1_pop_back) + - [Function `remove`](#@Specification_1_remove) + - [Function `swap_remove`](#@Specification_1_swap_remove) + - [Function `swap`](#@Specification_1_swap) + - [Function `reverse`](#@Specification_1_reverse) + - [Function `index_of`](#@Specification_1_index_of) + + +
use 0x1::error;
+use 0x1::table_with_length;
+use 0x1::vector;
+
+ + + + + +## Struct `BigVector` + +A scalable vector implementation based on tables where elements are grouped into buckets. +Each bucket has a capacity of bucket_size elements. + + +
struct BigVector<T> has store
+
+ + + +
+Fields + + +
+
+buckets: table_with_length::TableWithLength<u64, vector<T>> +
+
+ +
+
+end_index: u64 +
+
+ +
+
+bucket_size: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Vector index is out of bounds + + +
const EINDEX_OUT_OF_BOUNDS: u64 = 1;
+
+ + + + + +Cannot pop back from an empty vector + + +
const EVECTOR_EMPTY: u64 = 3;
+
+ + + + + +Cannot destroy a non-empty vector + + +
const EVECTOR_NOT_EMPTY: u64 = 2;
+
+ + + + + +bucket_size cannot be 0 + + +
const EZERO_BUCKET_SIZE: u64 = 4;
+
+ + + + + +## Function `empty` + +Regular Vector API +Create an empty vector. + + +
public(friend) fun empty<T: store>(bucket_size: u64): big_vector::BigVector<T>
+
+ + + +
+Implementation + + +
public(friend) fun empty<T: store>(bucket_size: u64): BigVector<T> {
+    assert!(bucket_size > 0, error::invalid_argument(EZERO_BUCKET_SIZE));
+    BigVector {
+        buckets: table_with_length::new(),
+        end_index: 0,
+        bucket_size,
+    }
+}
+
+ + + +
+ + + +## Function `singleton` + +Create a vector of length 1 containing the passed in element. + + +
public(friend) fun singleton<T: store>(element: T, bucket_size: u64): big_vector::BigVector<T>
+
+ + + +
+Implementation + + +
public(friend) fun singleton<T: store>(element: T, bucket_size: u64): BigVector<T> {
+    let v = empty(bucket_size);
+    push_back(&mut v, element);
+    v
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy the vector self. +Aborts if self is not empty. + + +
public fun destroy_empty<T>(self: big_vector::BigVector<T>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<T>(self: BigVector<T>) {
+    assert!(is_empty(&self), error::invalid_argument(EVECTOR_NOT_EMPTY));
+    let BigVector { buckets, end_index: _, bucket_size: _ } = self;
+    table_with_length::destroy_empty(buckets);
+}
+
+ + + +
+ + + +## Function `destroy` + +Destroy the vector self if T has drop + + +
public fun destroy<T: drop>(self: big_vector::BigVector<T>)
+
+ + + +
+Implementation + + +
public fun destroy<T: drop>(self: BigVector<T>) {
+    let BigVector { buckets, end_index, bucket_size: _ } = self;
+    let i = 0;
+    while (end_index > 0) {
+        let num_elements = vector::length(&table_with_length::remove(&mut buckets, i));
+        end_index = end_index - num_elements;
+        i = i + 1;
+    };
+    table_with_length::destroy_empty(buckets);
+}
+
+ + + +
+ + + +## Function `borrow` + +Acquire an immutable reference to the ith element of the vector self. +Aborts if i is out of bounds. + + +
public fun borrow<T>(self: &big_vector::BigVector<T>, i: u64): &T
+
+ + + +
+Implementation + + +
public fun borrow<T>(self: &BigVector<T>, i: u64): &T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    vector::borrow(table_with_length::borrow(&self.buckets, i / self.bucket_size), i % self.bucket_size)
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Return a mutable reference to the ith element in the vector self. +Aborts if i is out of bounds. + + +
public fun borrow_mut<T>(self: &mut big_vector::BigVector<T>, i: u64): &mut T
+
+ + + +
+Implementation + + +
public fun borrow_mut<T>(self: &mut BigVector<T>, i: u64): &mut T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    vector::borrow_mut(table_with_length::borrow_mut(&mut self.buckets, i / self.bucket_size), i % self.bucket_size)
+}
+
+ + + +
+ + + +## Function `append` + +Empty and destroy the other vector, and push each of the elements in the other vector onto the self vector in the +same order as they occurred in other. +Disclaimer: This function is costly. Use it at your own discretion. + + +
public fun append<T: store>(self: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
+
+ + + +
+Implementation + + +
public fun append<T: store>(self: &mut BigVector<T>, other: BigVector<T>) {
+    let other_len = length(&other);
+    let half_other_len = other_len / 2;
+    let i = 0;
+    while (i < half_other_len) {
+        push_back(self, swap_remove(&mut other, i));
+        i = i + 1;
+    };
+    while (i < other_len) {
+        push_back(self, pop_back(&mut other));
+        i = i + 1;
+    };
+    destroy_empty(other);
+}
+
+ + + +
+ + + +## Function `push_back` + +Add element val to the end of the vector self. It grows the buckets when the current buckets are full. +This operation will cost more gas when it adds new bucket. + + +
public fun push_back<T: store>(self: &mut big_vector::BigVector<T>, val: T)
+
+ + + +
+Implementation + + +
public fun push_back<T: store>(self: &mut BigVector<T>, val: T) {
+    let num_buckets = table_with_length::length(&self.buckets);
+    if (self.end_index == num_buckets * self.bucket_size) {
+        table_with_length::add(&mut self.buckets, num_buckets, vector::empty());
+        vector::push_back(table_with_length::borrow_mut(&mut self.buckets, num_buckets), val);
+    } else {
+        vector::push_back(table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1), val);
+    };
+    self.end_index = self.end_index + 1;
+}
+
+ + + +
+ + + +## Function `pop_back` + +Pop an element from the end of vector self. It doesn't shrink the buckets even if they're empty. +Call shrink_to_fit explicity to deallocate empty buckets. +Aborts if self is empty. + + +
public fun pop_back<T>(self: &mut big_vector::BigVector<T>): T
+
+ + + +
+Implementation + + +
public fun pop_back<T>(self: &mut BigVector<T>): T {
+    assert!(!is_empty(self), error::invalid_state(EVECTOR_EMPTY));
+    let num_buckets = table_with_length::length(&self.buckets);
+    let last_bucket = table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1);
+    let val = vector::pop_back(last_bucket);
+    // Shrink the table if the last vector is empty.
+    if (vector::is_empty(last_bucket)) {
+        move last_bucket;
+        vector::destroy_empty(table_with_length::remove(&mut self.buckets, num_buckets - 1));
+    };
+    self.end_index = self.end_index - 1;
+    val
+}
+
+ + + +
+ + + +## Function `remove` + +Remove the element at index i in the vector v and return the owned value that was previously stored at i in self. +All elements occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. +Disclaimer: This function is costly. Use it at your own discretion. + + +
public fun remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
+
+ + + +
+Implementation + + +
public fun remove<T>(self: &mut BigVector<T>, i: u64): T {
+    let len = length(self);
+    assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let num_buckets = table_with_length::length(&self.buckets);
+    let cur_bucket_index = i / self.bucket_size + 1;
+    let cur_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index - 1);
+    let res = vector::remove(cur_bucket, i % self.bucket_size);
+    self.end_index = self.end_index - 1;
+    move cur_bucket;
+    while ({
+        spec {
+            invariant cur_bucket_index <= num_buckets;
+            invariant table_with_length::spec_len(self.buckets) == num_buckets;
+        };
+        (cur_bucket_index < num_buckets)
+    }) {
+        // remove one element from the start of current vector
+        let cur_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index);
+        let t = vector::remove(cur_bucket, 0);
+        move cur_bucket;
+        // and put it at the end of the last one
+        let prev_bucket = table_with_length::borrow_mut(&mut self.buckets, cur_bucket_index - 1);
+        vector::push_back(prev_bucket, t);
+        cur_bucket_index = cur_bucket_index + 1;
+    };
+    spec {
+        assert cur_bucket_index == num_buckets;
+    };
+
+    // Shrink the table if the last vector is empty.
+    let last_bucket = table_with_length::borrow_mut(&mut self.buckets, num_buckets - 1);
+    if (vector::is_empty(last_bucket)) {
+        move last_bucket;
+        vector::destroy_empty(table_with_length::remove(&mut self.buckets, num_buckets - 1));
+    };
+
+    res
+}
+
+ + + +
+ + + +## Function `swap_remove` + +Swap the ith element of the vector self with the last element and then pop the vector. +This is O(1), but does not preserve ordering of elements in the vector. +Aborts if i is out of bounds. + + +
public fun swap_remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
+
+ + + +
+Implementation + + +
public fun swap_remove<T>(self: &mut BigVector<T>, i: u64): T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let last_val = pop_back(self);
+    // if the requested value is the last one, return it
+    if (self.end_index == i) {
+        return last_val
+    };
+    // because the lack of mem::swap, here we swap remove the requested value from the bucket
+    // and append the last_val to the bucket then swap the last bucket val back
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, i / self.bucket_size);
+    let bucket_len = vector::length(bucket);
+    let val = vector::swap_remove(bucket, i % self.bucket_size);
+    vector::push_back(bucket, last_val);
+    vector::swap(bucket, i % self.bucket_size, bucket_len - 1);
+    val
+}
+
+ + + +
+ + + +## Function `swap` + +Swap the elements at the i'th and j'th indices in the vector self. Will abort if either of i or j are out of bounds +for self. + + +
public fun swap<T>(self: &mut big_vector::BigVector<T>, i: u64, j: u64)
+
+ + + +
+Implementation + + +
public fun swap<T>(self: &mut BigVector<T>, i: u64, j: u64) {
+    assert!(i < length(self) && j < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let i_bucket_index = i / self.bucket_size;
+    let j_bucket_index = j / self.bucket_size;
+    let i_vector_index = i % self.bucket_size;
+    let j_vector_index = j % self.bucket_size;
+    if (i_bucket_index == j_bucket_index) {
+        vector::swap(table_with_length::borrow_mut(&mut self.buckets, i_bucket_index), i_vector_index, j_vector_index);
+        return
+    };
+    // If i and j are in different buckets, take the buckets out first for easy mutation.
+    let bucket_i = table_with_length::remove(&mut self.buckets, i_bucket_index);
+    let bucket_j = table_with_length::remove(&mut self.buckets, j_bucket_index);
+    // Get the elements from buckets by calling `swap_remove`.
+    let element_i = vector::swap_remove(&mut bucket_i, i_vector_index);
+    let element_j = vector::swap_remove(&mut bucket_j, j_vector_index);
+    // Swap the elements and push back to the other bucket.
+    vector::push_back(&mut bucket_i, element_j);
+    vector::push_back(&mut bucket_j, element_i);
+    let last_index_in_bucket_i = vector::length(&bucket_i) - 1;
+    let last_index_in_bucket_j = vector::length(&bucket_j) - 1;
+    // Re-position the swapped elements to the right index.
+    vector::swap(&mut bucket_i, i_vector_index, last_index_in_bucket_i);
+    vector::swap(&mut bucket_j, j_vector_index, last_index_in_bucket_j);
+    // Add back the buckets.
+    table_with_length::add(&mut self.buckets, i_bucket_index, bucket_i);
+    table_with_length::add(&mut self.buckets, j_bucket_index, bucket_j);
+}
+
+ + + +
+ + + +## Function `reverse` + +Reverse the order of the elements in the vector self in-place. +Disclaimer: This function is costly. Use it at your own discretion. + + +
public fun reverse<T>(self: &mut big_vector::BigVector<T>)
+
+ + + +
+Implementation + + +
public fun reverse<T>(self: &mut BigVector<T>) {
+    let new_buckets = vector[];
+    let push_bucket = vector[];
+    let num_buckets = table_with_length::length(&self.buckets);
+    let num_buckets_left = num_buckets;
+
+    while (num_buckets_left > 0) {
+        let pop_bucket = table_with_length::remove(&mut self.buckets, num_buckets_left - 1);
+        vector::for_each_reverse(pop_bucket, |val| {
+            vector::push_back(&mut push_bucket, val);
+            if (vector::length(&push_bucket) == self.bucket_size) {
+                vector::push_back(&mut new_buckets, push_bucket);
+                push_bucket = vector[];
+            };
+        });
+        num_buckets_left = num_buckets_left - 1;
+    };
+
+    if (vector::length(&push_bucket) > 0) {
+        vector::push_back(&mut new_buckets, push_bucket);
+    } else {
+        vector::destroy_empty(push_bucket);
+    };
+
+    vector::reverse(&mut new_buckets);
+    let i = 0;
+    assert!(table_with_length::length(&self.buckets) == 0, 0);
+    while (i < num_buckets) {
+        table_with_length::add(&mut self.buckets, i, vector::pop_back(&mut new_buckets));
+        i = i + 1;
+    };
+    vector::destroy_empty(new_buckets);
+}
+
+ + + +
+ + + +## Function `index_of` + +Return the index of the first occurrence of an element in self that is equal to e. Returns (true, index) if such an +element was found, and (false, 0) otherwise. +Disclaimer: This function is costly. Use it at your own discretion. + + +
public fun index_of<T>(self: &big_vector::BigVector<T>, val: &T): (bool, u64)
+
+ + + +
+Implementation + + +
public fun index_of<T>(self: &BigVector<T>, val: &T): (bool, u64) {
+    let num_buckets = table_with_length::length(&self.buckets);
+    let bucket_index = 0;
+    while (bucket_index < num_buckets) {
+        let cur = table_with_length::borrow(&self.buckets, bucket_index);
+        let (found, i) = vector::index_of(cur, val);
+        if (found) {
+            return (true, bucket_index * self.bucket_size + i)
+        };
+        bucket_index = bucket_index + 1;
+    };
+    (false, 0)
+}
+
+ + + +
+ + + +## Function `contains` + +Return if an element equal to e exists in the vector self. +Disclaimer: This function is costly. Use it at your own discretion. + + +
public fun contains<T>(self: &big_vector::BigVector<T>, val: &T): bool
+
+ + + +
+Implementation + + +
public fun contains<T>(self: &BigVector<T>, val: &T): bool {
+    if (is_empty(self)) return false;
+    let (exist, _) = index_of(self, val);
+    exist
+}
+
+ + + +
+ + + +## Function `to_vector` + +Convert a big vector to a native vector, which is supposed to be called mostly by view functions to get an +atomic view of the whole vector. +Disclaimer: This function may be costly as the big vector may be huge in size. Use it at your own discretion. + + +
public fun to_vector<T: copy>(self: &big_vector::BigVector<T>): vector<T>
+
+ + + +
+Implementation + + +
public fun to_vector<T: copy>(self: &BigVector<T>): vector<T> {
+    let res = vector[];
+    let num_buckets = table_with_length::length(&self.buckets);
+    let i = 0;
+    while (i < num_buckets) {
+        vector::append(&mut res, *table_with_length::borrow(&self.buckets, i));
+        i = i + 1;
+    };
+    res
+}
+
+ + + +
+ + + +## Function `length` + +Return the length of the vector. + + +
public fun length<T>(self: &big_vector::BigVector<T>): u64
+
+ + + +
+Implementation + + +
public fun length<T>(self: &BigVector<T>): u64 {
+    self.end_index
+}
+
+ + + +
+ + + +## Function `is_empty` + +Return true if the vector v has no elements and false otherwise. + + +
public fun is_empty<T>(self: &big_vector::BigVector<T>): bool
+
+ + + +
+Implementation + + +
public fun is_empty<T>(self: &BigVector<T>): bool {
+    length(self) == 0
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `BigVector` + + +
struct BigVector<T> has store
+
+ + + +
+
+buckets: table_with_length::TableWithLength<u64, vector<T>> +
+
+ +
+
+end_index: u64 +
+
+ +
+
+bucket_size: u64 +
+
+ +
+
+ + + +
invariant bucket_size != 0;
+invariant spec_table_len(buckets) == 0 ==> end_index == 0;
+invariant end_index == 0 ==> spec_table_len(buckets) == 0;
+invariant end_index <= spec_table_len(buckets) * bucket_size;
+invariant spec_table_len(buckets) == 0
+    || (forall i in 0..spec_table_len(buckets)-1: len(table_with_length::spec_get(buckets, i)) == bucket_size);
+invariant spec_table_len(buckets) == 0
+    || len(table_with_length::spec_get(buckets, spec_table_len(buckets) -1 )) <= bucket_size;
+invariant forall i in 0..spec_table_len(buckets): spec_table_contains(buckets, i);
+invariant spec_table_len(buckets) == (end_index + bucket_size - 1) / bucket_size;
+invariant (spec_table_len(buckets) == 0 && end_index == 0)
+    || (spec_table_len(buckets) != 0 && ((spec_table_len(buckets) - 1) * bucket_size) + (len(table_with_length::spec_get(buckets, spec_table_len(buckets) - 1))) == end_index);
+invariant forall i: u64 where i >= spec_table_len(buckets):  {
+    !spec_table_contains(buckets, i)
+};
+invariant forall i: u64 where i < spec_table_len(buckets):  {
+    spec_table_contains(buckets, i)
+};
+invariant spec_table_len(buckets) == 0
+    || (len(table_with_length::spec_get(buckets, spec_table_len(buckets) - 1)) > 0);
+
+ + + + + +### Function `empty` + + +
public(friend) fun empty<T: store>(bucket_size: u64): big_vector::BigVector<T>
+
+ + + + +
aborts_if bucket_size == 0;
+ensures length(result) == 0;
+ensures result.bucket_size == bucket_size;
+
+ + + + + +### Function `singleton` + + +
public(friend) fun singleton<T: store>(element: T, bucket_size: u64): big_vector::BigVector<T>
+
+ + + + +
aborts_if bucket_size == 0;
+ensures length(result) == 1;
+ensures result.bucket_size == bucket_size;
+
+ + + + + +### Function `destroy_empty` + + +
public fun destroy_empty<T>(self: big_vector::BigVector<T>)
+
+ + + + +
aborts_if !is_empty(self);
+
+ + + + + +### Function `borrow` + + +
public fun borrow<T>(self: &big_vector::BigVector<T>, i: u64): &T
+
+ + + + +
aborts_if i >= length(self);
+ensures result == spec_at(self, i);
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<T>(self: &mut big_vector::BigVector<T>, i: u64): &mut T
+
+ + + + +
aborts_if i >= length(self);
+ensures result == spec_at(self, i);
+
+ + + + + +### Function `append` + + +
public fun append<T: store>(self: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
+
+ + + + +
pragma verify=false;
+
+ + + + + +### Function `push_back` + + +
public fun push_back<T: store>(self: &mut big_vector::BigVector<T>, val: T)
+
+ + + + +
let num_buckets = spec_table_len(self.buckets);
+include PushbackAbortsIf<T>;
+ensures length(self) == length(old(self)) + 1;
+ensures self.end_index == old(self.end_index) + 1;
+ensures spec_at(self, self.end_index-1) == val;
+ensures forall i in 0..self.end_index-1: spec_at(self, i) == spec_at(old(self), i);
+ensures self.bucket_size == old(self).bucket_size;
+
+ + + + + + + +
schema PushbackAbortsIf<T> {
+    self: BigVector<T>;
+    let num_buckets = spec_table_len(self.buckets);
+    aborts_if num_buckets * self.bucket_size > MAX_U64;
+    aborts_if self.end_index + 1 > MAX_U64;
+}
+
+ + + + + +### Function `pop_back` + + +
public fun pop_back<T>(self: &mut big_vector::BigVector<T>): T
+
+ + + + +
aborts_if is_empty(self);
+ensures length(self) == length(old(self)) - 1;
+ensures result == old(spec_at(self, self.end_index-1));
+ensures forall i in 0..self.end_index: spec_at(self, i) == spec_at(old(self), i);
+
+ + + + + +### Function `remove` + + +
public fun remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
+
+ + + + +
pragma verify=false;
+
+ + + + + +### Function `swap_remove` + + +
public fun swap_remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
+
+ + + + +
pragma verify_duration_estimate = 120;
+aborts_if i >= length(self);
+ensures length(self) == length(old(self)) - 1;
+ensures result == spec_at(old(self), i);
+
+ + + + + +### Function `swap` + + +
public fun swap<T>(self: &mut big_vector::BigVector<T>, i: u64, j: u64)
+
+ + + + +
pragma verify_duration_estimate = 1000;
+aborts_if i >= length(self) || j >= length(self);
+ensures length(self) == length(old(self));
+ensures spec_at(self, i) == spec_at(old(self), j);
+ensures spec_at(self, j) == spec_at(old(self), i);
+ensures forall idx in 0..length(self)
+    where idx != i && idx != j:
+    spec_at(self, idx) == spec_at(old(self), idx);
+
+ + + + + +### Function `reverse` + + +
public fun reverse<T>(self: &mut big_vector::BigVector<T>)
+
+ + + + +
pragma verify=false;
+
+ + + + + +### Function `index_of` + + +
public fun index_of<T>(self: &big_vector::BigVector<T>, val: &T): (bool, u64)
+
+ + + + +
pragma verify=false;
+
+ + + + + + + +
fun spec_table_len<K, V>(t: TableWithLength<K, V>): u64 {
+   table_with_length::spec_len(t)
+}
+
+ + + + + + + +
fun spec_table_contains<K, V>(t: TableWithLength<K, V>, k: K): bool {
+   table_with_length::spec_contains(t, k)
+}
+
+ + + + + + + +
fun spec_at<T>(v: BigVector<T>, i: u64): T {
+   let bucket = i / v.bucket_size;
+   let idx = i % v.bucket_size;
+   let v = table_with_length::spec_get(v.buckets, bucket);
+   v[idx]
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bls12381.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bls12381.md new file mode 100644 index 0000000000000..edab19cccb525 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bls12381.md @@ -0,0 +1,1661 @@ + + + +# Module `0x1::bls12381` + +Contains functions for: + +The minimum-pubkey-size variant of [Boneh-Lynn-Shacham (BLS) signatures](https://en.wikipedia.org/wiki/BLS_digital_signature), +where public keys are BLS12-381 elliptic-curve points in $\mathbb{G}_1$ and signatures are in $\mathbb{G}_2$, +as per the [IETF BLS draft standard](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature#section-2.1). + + +- [Struct `PublicKey`](#0x1_bls12381_PublicKey) +- [Struct `ProofOfPossession`](#0x1_bls12381_ProofOfPossession) +- [Struct `PublicKeyWithPoP`](#0x1_bls12381_PublicKeyWithPoP) +- [Struct `AggrPublicKeysWithPoP`](#0x1_bls12381_AggrPublicKeysWithPoP) +- [Struct `Signature`](#0x1_bls12381_Signature) +- [Struct `AggrOrMultiSignature`](#0x1_bls12381_AggrOrMultiSignature) +- [Constants](#@Constants_0) +- [Function `public_key_from_bytes`](#0x1_bls12381_public_key_from_bytes) +- [Function `public_key_to_bytes`](#0x1_bls12381_public_key_to_bytes) +- [Function `proof_of_possession_from_bytes`](#0x1_bls12381_proof_of_possession_from_bytes) +- [Function `proof_of_possession_to_bytes`](#0x1_bls12381_proof_of_possession_to_bytes) +- [Function `public_key_from_bytes_with_pop`](#0x1_bls12381_public_key_from_bytes_with_pop) +- [Function `public_key_with_pop_to_normal`](#0x1_bls12381_public_key_with_pop_to_normal) +- [Function `public_key_with_pop_to_bytes`](#0x1_bls12381_public_key_with_pop_to_bytes) +- [Function `signature_from_bytes`](#0x1_bls12381_signature_from_bytes) +- [Function `signature_to_bytes`](#0x1_bls12381_signature_to_bytes) +- [Function `signature_subgroup_check`](#0x1_bls12381_signature_subgroup_check) +- [Function `aggregate_pubkeys`](#0x1_bls12381_aggregate_pubkeys) +- [Function `aggregate_pubkey_to_bytes`](#0x1_bls12381_aggregate_pubkey_to_bytes) +- [Function `aggregate_signatures`](#0x1_bls12381_aggregate_signatures) +- [Function `aggr_or_multi_signature_to_bytes`](#0x1_bls12381_aggr_or_multi_signature_to_bytes) +- [Function `aggr_or_multi_signature_from_bytes`](#0x1_bls12381_aggr_or_multi_signature_from_bytes) +- [Function `aggr_or_multi_signature_subgroup_check`](#0x1_bls12381_aggr_or_multi_signature_subgroup_check) +- [Function `verify_aggregate_signature`](#0x1_bls12381_verify_aggregate_signature) +- [Function `verify_multisignature`](#0x1_bls12381_verify_multisignature) +- [Function `verify_normal_signature`](#0x1_bls12381_verify_normal_signature) +- [Function `verify_signature_share`](#0x1_bls12381_verify_signature_share) +- [Function `aggregate_pubkeys_internal`](#0x1_bls12381_aggregate_pubkeys_internal) +- [Function `aggregate_signatures_internal`](#0x1_bls12381_aggregate_signatures_internal) +- [Function `validate_pubkey_internal`](#0x1_bls12381_validate_pubkey_internal) +- [Function `signature_subgroup_check_internal`](#0x1_bls12381_signature_subgroup_check_internal) +- [Function `verify_aggregate_signature_internal`](#0x1_bls12381_verify_aggregate_signature_internal) +- [Function `verify_multisignature_internal`](#0x1_bls12381_verify_multisignature_internal) +- [Function `verify_normal_signature_internal`](#0x1_bls12381_verify_normal_signature_internal) +- [Function `verify_proof_of_possession_internal`](#0x1_bls12381_verify_proof_of_possession_internal) +- [Function `verify_signature_share_internal`](#0x1_bls12381_verify_signature_share_internal) +- [Specification](#@Specification_1) + - [Function `public_key_from_bytes`](#@Specification_1_public_key_from_bytes) + - [Function `public_key_from_bytes_with_pop`](#@Specification_1_public_key_from_bytes_with_pop) + - [Function `aggregate_pubkeys`](#@Specification_1_aggregate_pubkeys) + - [Function `aggregate_signatures`](#@Specification_1_aggregate_signatures) + - [Function `aggr_or_multi_signature_from_bytes`](#@Specification_1_aggr_or_multi_signature_from_bytes) + - [Function `aggr_or_multi_signature_subgroup_check`](#@Specification_1_aggr_or_multi_signature_subgroup_check) + - [Function `verify_aggregate_signature`](#@Specification_1_verify_aggregate_signature) + - [Function `verify_multisignature`](#@Specification_1_verify_multisignature) + - [Function `verify_normal_signature`](#@Specification_1_verify_normal_signature) + - [Function `verify_signature_share`](#@Specification_1_verify_signature_share) + - [Function `aggregate_pubkeys_internal`](#@Specification_1_aggregate_pubkeys_internal) + - [Function `aggregate_signatures_internal`](#@Specification_1_aggregate_signatures_internal) + - [Function `validate_pubkey_internal`](#@Specification_1_validate_pubkey_internal) + - [Function `signature_subgroup_check_internal`](#@Specification_1_signature_subgroup_check_internal) + - [Function `verify_aggregate_signature_internal`](#@Specification_1_verify_aggregate_signature_internal) + - [Function `verify_multisignature_internal`](#@Specification_1_verify_multisignature_internal) + - [Function `verify_normal_signature_internal`](#@Specification_1_verify_normal_signature_internal) + - [Function `verify_proof_of_possession_internal`](#@Specification_1_verify_proof_of_possession_internal) + - [Function `verify_signature_share_internal`](#@Specification_1_verify_signature_share_internal) + - [Helper functions](#@Helper_functions_2) + + +
use 0x1::error;
+use 0x1::option;
+
+ + + + + +## Struct `PublicKey` + +A *validated* public key that: +(1) is a point in the prime-order subgroup of the BLS12-381 elliptic curve, and +(2) is not the identity point + +This struct can be used to verify a normal (non-aggregated) signature. + +This struct can be combined with a ProofOfPossession struct in order to create a PublicKeyWithPop struct, which +can be used to verify a multisignature. + + +
struct PublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `ProofOfPossession` + +A proof-of-possession (PoP). +Given such a struct and a PublicKey struct, one can construct a PublicKeyWithPoP (see below). + + +
struct ProofOfPossession has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `PublicKeyWithPoP` + +A *validated* public key that had a successfully-verified proof-of-possession (PoP). + +A vector of these structs can be either: +(1) used to verify an aggregate signature +(2) aggregated with other PublicKeyWithPoP structs into an AggrPublicKeysWithPoP, which in turn can be used +to verify a multisignature + + +
struct PublicKeyWithPoP has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `AggrPublicKeysWithPoP` + +An aggregation of public keys with verified PoPs, which can be used to verify multisignatures. + + +
struct AggrPublicKeysWithPoP has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `Signature` + +A BLS signature. This can be either a: +(1) normal (non-aggregated) signature +(2) signature share (for a multisignature or aggregate signature) + + +
struct Signature has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `AggrOrMultiSignature` + +An aggregation of BLS signatures. This can be either a: +(4) aggregated signature (i.e., an aggregation of signatures s_i, each on a message m_i) +(3) multisignature (i.e., an aggregation of signatures s_i, each on the same message m) + +We distinguish between a Signature type and a AggrOrMultiSignature type to prevent developers from interchangeably +calling verify_multisignature and verify_signature_share to verify both multisignatures and signature shares, +which could create problems down the line. + + +
struct AggrOrMultiSignature has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +One of the given inputs has the wrong size.s + + +
const EWRONG_SIZE: u64 = 2;
+
+ + + + + +The caller was supposed to input one or more public keys. + + +
const EZERO_PUBKEYS: u64 = 1;
+
+ + + + + +The number of signers does not match the number of messages to be signed. + + +
const E_NUM_SIGNERS_MUST_EQ_NUM_MESSAGES: u64 = 3;
+
+ + + + + +The public key size, in bytes + + +
const PUBLIC_KEY_NUM_BYTES: u64 = 48;
+
+ + + + + +Random signature generated by running cargo test -- bls12381_sample_signature --nocapture --include-ignored in crates/aptos-crypto. +The associated SK is 07416693b6b32c84abe45578728e2379f525729e5b94762435a31e65ecc728da. + + +
const RANDOM_PK: vector<u8> = [138, 83, 231, 174, 82, 112, 227, 231, 101, 205, 138, 64, 50, 194, 231, 124, 111, 126, 135, 164, 78, 187, 133, 191, 40, 164, 215, 134, 85, 101, 105, 143, 151, 83, 70, 113, 66, 98, 249, 228, 124, 111, 62, 13, 93, 149, 22, 96];
+
+ + + + + +Random signature generated by running cargo test -- bls12381_sample_signature --nocapture --include-ignored in crates/aptos-crypto. +The message signed is "Hello Aptos!" and the associated SK is 07416693b6b32c84abe45578728e2379f525729e5b94762435a31e65ecc728da. + + +
const RANDOM_SIGNATURE: vector<u8> = [160, 26, 101, 133, 79, 152, 125, 52, 52, 20, 155, 127, 8, 247, 7, 48, 227, 11, 36, 25, 132, 232, 113, 43, 194, 172, 168, 133, 214, 50, 170, 252, 237, 76, 63, 102, 18, 9, 222, 187, 107, 28, 134, 1, 50, 102, 35, 204, 22, 202, 47, 108, 158, 220, 83, 183, 184, 139, 116, 53, 251, 107, 5, 221, 236, 228, 24, 210, 195, 77, 198, 172, 162, 245, 161, 26, 121, 230, 119, 116, 88, 44, 20, 8, 74, 1, 220, 183, 130, 14, 76, 180, 186, 208, 234, 141];
+
+ + + + + +The signature size, in bytes + + +
const SIGNATURE_SIZE: u64 = 96;
+
+ + + + + +## Function `public_key_from_bytes` + +Creates a new public key from a sequence of bytes. + + +
public fun public_key_from_bytes(bytes: vector<u8>): option::Option<bls12381::PublicKey>
+
+ + + +
+Implementation + + +
public fun public_key_from_bytes(bytes: vector<u8>): Option<PublicKey> {
+    if (validate_pubkey_internal(bytes)) {
+        option::some(PublicKey {
+            bytes
+        })
+    } else {
+        option::none<PublicKey>()
+    }
+}
+
+ + + +
+ + + +## Function `public_key_to_bytes` + +Serializes a public key into 48 bytes. + + +
public fun public_key_to_bytes(pk: &bls12381::PublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun public_key_to_bytes(pk: &PublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `proof_of_possession_from_bytes` + +Creates a new proof-of-possession (PoP) which can be later used to create a PublicKeyWithPoP struct, + + +
public fun proof_of_possession_from_bytes(bytes: vector<u8>): bls12381::ProofOfPossession
+
+ + + +
+Implementation + + +
public fun proof_of_possession_from_bytes(bytes: vector<u8>): ProofOfPossession {
+    ProofOfPossession {
+        bytes
+    }
+}
+
+ + + +
+ + + +## Function `proof_of_possession_to_bytes` + +Serializes the signature into 96 bytes. + + +
public fun proof_of_possession_to_bytes(pop: &bls12381::ProofOfPossession): vector<u8>
+
+ + + +
+Implementation + + +
public fun proof_of_possession_to_bytes(pop: &ProofOfPossession): vector<u8> {
+    pop.bytes
+}
+
+ + + +
+ + + +## Function `public_key_from_bytes_with_pop` + +Creates a PoP'd public key from a normal public key and a corresponding proof-of-possession. + + +
public fun public_key_from_bytes_with_pop(pk_bytes: vector<u8>, pop: &bls12381::ProofOfPossession): option::Option<bls12381::PublicKeyWithPoP>
+
+ + + +
+Implementation + + +
public fun public_key_from_bytes_with_pop(pk_bytes: vector<u8>, pop: &ProofOfPossession): Option<PublicKeyWithPoP> {
+    if (verify_proof_of_possession_internal(pk_bytes, pop.bytes)) {
+        option::some(PublicKeyWithPoP {
+            bytes: pk_bytes
+        })
+    } else {
+        option::none<PublicKeyWithPoP>()
+    }
+}
+
+ + + +
+ + + +## Function `public_key_with_pop_to_normal` + +Creates a normal public key from a PoP'd public key. + + +
public fun public_key_with_pop_to_normal(pkpop: &bls12381::PublicKeyWithPoP): bls12381::PublicKey
+
+ + + +
+Implementation + + +
public fun public_key_with_pop_to_normal(pkpop: &PublicKeyWithPoP): PublicKey {
+    PublicKey {
+        bytes: pkpop.bytes
+    }
+}
+
+ + + +
+ + + +## Function `public_key_with_pop_to_bytes` + +Serializes a PoP'd public key into 48 bytes. + + +
public fun public_key_with_pop_to_bytes(pk: &bls12381::PublicKeyWithPoP): vector<u8>
+
+ + + +
+Implementation + + +
public fun public_key_with_pop_to_bytes(pk: &PublicKeyWithPoP): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `signature_from_bytes` + +Creates a new signature from a sequence of bytes. Does not check the signature for prime-order subgroup +membership since that is done implicitly during verification. + + +
public fun signature_from_bytes(bytes: vector<u8>): bls12381::Signature
+
+ + + +
+Implementation + + +
public fun signature_from_bytes(bytes: vector<u8>): Signature {
+    Signature {
+        bytes
+    }
+}
+
+ + + +
+ + + +## Function `signature_to_bytes` + +Serializes the signature into 96 bytes. + + +
public fun signature_to_bytes(sig: &bls12381::Signature): vector<u8>
+
+ + + +
+Implementation + + +
public fun signature_to_bytes(sig: &Signature): vector<u8> {
+    sig.bytes
+}
+
+ + + +
+ + + +## Function `signature_subgroup_check` + +Checks that the group element that defines a signature is in the prime-order subgroup. +This check is implicitly performed when verifying any signature via this module, but we expose this functionality +in case it might be useful for applications to easily dismiss invalid signatures early on. + + +
public fun signature_subgroup_check(signature: &bls12381::Signature): bool
+
+ + + +
+Implementation + + +
public fun signature_subgroup_check(signature: &Signature): bool {
+    signature_subgroup_check_internal(signature.bytes)
+}
+
+ + + +
+ + + +## Function `aggregate_pubkeys` + +Given a vector of public keys with verified PoPs, combines them into an *aggregated* public key which can be used +to verify multisignatures using verify_multisignature and aggregate signatures using verify_aggregate_signature. +Aborts if no public keys are given as input. + + +
public fun aggregate_pubkeys(public_keys: vector<bls12381::PublicKeyWithPoP>): bls12381::AggrPublicKeysWithPoP
+
+ + + +
+Implementation + + +
public fun aggregate_pubkeys(public_keys: vector<PublicKeyWithPoP>): AggrPublicKeysWithPoP {
+    let (bytes, success) = aggregate_pubkeys_internal(public_keys);
+    assert!(success, std::error::invalid_argument(EZERO_PUBKEYS));
+
+    AggrPublicKeysWithPoP {
+        bytes
+    }
+}
+
+ + + +
+ + + +## Function `aggregate_pubkey_to_bytes` + +Serializes an aggregate public key into 48 bytes. + + +
public fun aggregate_pubkey_to_bytes(apk: &bls12381::AggrPublicKeysWithPoP): vector<u8>
+
+ + + +
+Implementation + + +
public fun aggregate_pubkey_to_bytes(apk: &AggrPublicKeysWithPoP): vector<u8> {
+    apk.bytes
+}
+
+ + + +
+ + + +## Function `aggregate_signatures` + +Aggregates the input signatures into an aggregate-or-multi-signature structure, which can be later verified via +verify_aggregate_signature or verify_multisignature. Returns None if zero signatures are given as input +or if some of the signatures are not valid group elements. + + +
public fun aggregate_signatures(signatures: vector<bls12381::Signature>): option::Option<bls12381::AggrOrMultiSignature>
+
+ + + +
+Implementation + + +
public fun aggregate_signatures(signatures: vector<Signature>): Option<AggrOrMultiSignature> {
+    let (bytes, success) = aggregate_signatures_internal(signatures);
+    if (success) {
+        option::some(
+            AggrOrMultiSignature {
+                bytes
+            }
+        )
+    } else {
+        option::none<AggrOrMultiSignature>()
+    }
+}
+
+ + + +
+ + + +## Function `aggr_or_multi_signature_to_bytes` + +Serializes an aggregate-or-multi-signature into 96 bytes. + + +
public fun aggr_or_multi_signature_to_bytes(sig: &bls12381::AggrOrMultiSignature): vector<u8>
+
+ + + +
+Implementation + + +
public fun aggr_or_multi_signature_to_bytes(sig: &AggrOrMultiSignature): vector<u8> {
+    sig.bytes
+}
+
+ + + +
+ + + +## Function `aggr_or_multi_signature_from_bytes` + +Deserializes an aggregate-or-multi-signature from 96 bytes. + + +
public fun aggr_or_multi_signature_from_bytes(bytes: vector<u8>): bls12381::AggrOrMultiSignature
+
+ + + +
+Implementation + + +
public fun aggr_or_multi_signature_from_bytes(bytes: vector<u8>): AggrOrMultiSignature {
+    assert!(std::vector::length(&bytes) == SIGNATURE_SIZE, std::error::invalid_argument(EWRONG_SIZE));
+
+    AggrOrMultiSignature {
+        bytes
+    }
+}
+
+ + + +
+ + + +## Function `aggr_or_multi_signature_subgroup_check` + +Checks that the group element that defines an aggregate-or-multi-signature is in the prime-order subgroup. + + +
public fun aggr_or_multi_signature_subgroup_check(signature: &bls12381::AggrOrMultiSignature): bool
+
+ + + +
+Implementation + + +
public fun aggr_or_multi_signature_subgroup_check(signature: &AggrOrMultiSignature): bool {
+    signature_subgroup_check_internal(signature.bytes)
+}
+
+ + + +
+ + + +## Function `verify_aggregate_signature` + +Verifies an aggregate signature, an aggregation of many signatures s_i, each on a different message m_i. + + +
public fun verify_aggregate_signature(aggr_sig: &bls12381::AggrOrMultiSignature, public_keys: vector<bls12381::PublicKeyWithPoP>, messages: vector<vector<u8>>): bool
+
+ + + +
+Implementation + + +
public fun verify_aggregate_signature(
+    aggr_sig: &AggrOrMultiSignature,
+    public_keys: vector<PublicKeyWithPoP>,
+    messages: vector<vector<u8>>,
+): bool {
+    verify_aggregate_signature_internal(aggr_sig.bytes, public_keys, messages)
+}
+
+ + + +
+ + + +## Function `verify_multisignature` + +Verifies a multisignature: an aggregation of many signatures, each on the same message m. + + +
public fun verify_multisignature(multisig: &bls12381::AggrOrMultiSignature, aggr_public_key: &bls12381::AggrPublicKeysWithPoP, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_multisignature(
+    multisig: &AggrOrMultiSignature,
+    aggr_public_key: &AggrPublicKeysWithPoP,
+    message: vector<u8>
+): bool {
+    verify_multisignature_internal(multisig.bytes, aggr_public_key.bytes, message)
+}
+
+ + + +
+ + + +## Function `verify_normal_signature` + +Verifies a normal, non-aggregated signature. + + +
public fun verify_normal_signature(signature: &bls12381::Signature, public_key: &bls12381::PublicKey, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_normal_signature(
+    signature: &Signature,
+    public_key: &PublicKey,
+    message: vector<u8>
+): bool {
+    verify_normal_signature_internal(signature.bytes, public_key.bytes, message)
+}
+
+ + + +
+ + + +## Function `verify_signature_share` + +Verifies a signature share in the multisignature share or an aggregate signature share. + + +
public fun verify_signature_share(signature_share: &bls12381::Signature, public_key: &bls12381::PublicKeyWithPoP, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_signature_share(
+    signature_share: &Signature,
+    public_key: &PublicKeyWithPoP,
+    message: vector<u8>
+): bool {
+    verify_signature_share_internal(signature_share.bytes, public_key.bytes, message)
+}
+
+ + + +
+ + + +## Function `aggregate_pubkeys_internal` + +CRYPTOGRAPHY WARNING: This function assumes that the caller verified all public keys have a valid +proof-of-possesion (PoP) using verify_proof_of_possession. + +Given a vector of serialized public keys, combines them into an aggregated public key, returning (bytes, true), +where bytes store the serialized public key. +Aborts if no public keys are given as input. + + +
fun aggregate_pubkeys_internal(public_keys: vector<bls12381::PublicKeyWithPoP>): (vector<u8>, bool)
+
+ + + +
+Implementation + + +
native fun aggregate_pubkeys_internal(public_keys: vector<PublicKeyWithPoP>): (vector<u8>, bool);
+
+ + + +
+ + + +## Function `aggregate_signatures_internal` + +CRYPTOGRAPHY WARNING: This function can be safely called without verifying that the input signatures are elements +of the prime-order subgroup of the BLS12-381 curve. + +Given a vector of serialized signatures, combines them into an aggregate signature, returning (bytes, true), +where bytes store the serialized signature. +Does not check the input signatures nor the final aggregated signatures for prime-order subgroup membership. +Returns (_, false) if no signatures are given as input. +Does not abort. + + +
fun aggregate_signatures_internal(signatures: vector<bls12381::Signature>): (vector<u8>, bool)
+
+ + + +
+Implementation + + +
native fun aggregate_signatures_internal(signatures: vector<Signature>): (vector<u8>, bool);
+
+ + + +
+ + + +## Function `validate_pubkey_internal` + +Return true if the bytes in public_key are a valid BLS12-381 public key: +(1) it is NOT the identity point, and +(2) it is a BLS12-381 elliptic curve point, and +(3) it is a prime-order point +Return false otherwise. +Does not abort. + + +
fun validate_pubkey_internal(public_key: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun validate_pubkey_internal(public_key: vector<u8>): bool;
+
+ + + +
+ + + +## Function `signature_subgroup_check_internal` + +Return true if the elliptic curve point serialized in signature: +(1) is NOT the identity point, and +(2) is a BLS12-381 elliptic curve point, and +(3) is a prime-order point +Return false otherwise. +Does not abort. + + +
fun signature_subgroup_check_internal(signature: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun signature_subgroup_check_internal(signature: vector<u8>): bool;
+
+ + + +
+ + + +## Function `verify_aggregate_signature_internal` + +CRYPTOGRAPHY WARNING: First, this function assumes all public keys have a valid proof-of-possesion (PoP). +This prevents both small-subgroup attacks and rogue-key attacks. Second, this function can be safely called +without verifying that the aggregate signature is in the prime-order subgroup of the BLS12-381 curve. + +Returns true if the aggregate signature aggsig on messages under public_keys verifies (where messages[i] +should be signed by public_keys[i]). + +Returns false if either: +- no public keys or messages are given as input, +- number of messages does not equal number of public keys +- aggsig (1) is the identity point, or (2) is NOT a BLS12-381 elliptic curve point, or (3) is NOT a +prime-order point +Does not abort. + + +
fun verify_aggregate_signature_internal(aggsig: vector<u8>, public_keys: vector<bls12381::PublicKeyWithPoP>, messages: vector<vector<u8>>): bool
+
+ + + +
+Implementation + + +
native fun verify_aggregate_signature_internal(
+    aggsig: vector<u8>,
+    public_keys: vector<PublicKeyWithPoP>,
+    messages: vector<vector<u8>>,
+): bool;
+
+ + + +
+ + + +## Function `verify_multisignature_internal` + +CRYPTOGRAPHY WARNING: This function assumes verified proofs-of-possesion (PoP) for the public keys used in +computing the aggregate public key. This prevents small-subgroup attacks and rogue-key attacks. + +Return true if the BLS multisignature on message verifies against the BLS aggregate public key agg_public_key. +Returns false otherwise. +Does not abort. + + +
fun verify_multisignature_internal(multisignature: vector<u8>, agg_public_key: vector<u8>, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun verify_multisignature_internal(
+    multisignature: vector<u8>,
+    agg_public_key: vector<u8>,
+    message: vector<u8>
+): bool;
+
+ + + +
+ + + +## Function `verify_normal_signature_internal` + +CRYPTOGRAPHY WARNING: This function WILL check that the public key is a prime-order point, in order to prevent +library users from misusing the library by forgetting to validate public keys before giving them as arguments to +this function. + +Returns true if the signature on message verifies under public key. +Returns false otherwise. +Does not abort. + + +
fun verify_normal_signature_internal(signature: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun verify_normal_signature_internal(
+    signature: vector<u8>,
+    public_key: vector<u8>,
+    message: vector<u8>
+): bool;
+
+ + + +
+ + + +## Function `verify_proof_of_possession_internal` + +Return true if the bytes in public_key are a valid bls12381 public key (as per validate_pubkey) +*and* this public key has a valid proof-of-possesion (PoP). +Return false otherwise. +Does not abort. + + +
fun verify_proof_of_possession_internal(public_key: vector<u8>, proof_of_possesion: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun verify_proof_of_possession_internal(
+    public_key: vector<u8>,
+    proof_of_possesion: vector<u8>
+): bool;
+
+ + + +
+ + + +## Function `verify_signature_share_internal` + +CRYPTOGRAPHY WARNING: Assumes the public key has a valid proof-of-possesion (PoP). This prevents rogue-key +attacks later on during signature aggregation. + +Returns true if the signature_share on message verifies under public key. +Returns false otherwise, similar to verify_multisignature. +Does not abort. + + +
fun verify_signature_share_internal(signature_share: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun verify_signature_share_internal(
+    signature_share: vector<u8>,
+    public_key: vector<u8>,
+    message: vector<u8>
+): bool;
+
+ + + +
+ + + +## Specification + + + + +### Function `public_key_from_bytes` + + +
public fun public_key_from_bytes(bytes: vector<u8>): option::Option<bls12381::PublicKey>
+
+ + + + +
aborts_if false;
+ensures spec_validate_pubkey_internal(bytes) ==> (std::option::spec_is_some(result) && std::option::spec_borrow(result).bytes == bytes);
+ensures !spec_validate_pubkey_internal(bytes) ==> std::option::spec_is_none(result);
+
+ + + + + +### Function `public_key_from_bytes_with_pop` + + +
public fun public_key_from_bytes_with_pop(pk_bytes: vector<u8>, pop: &bls12381::ProofOfPossession): option::Option<bls12381::PublicKeyWithPoP>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures spec_verify_proof_of_possession_internal(pk_bytes, pop.bytes) ==> (std::option::spec_is_some(result) && std::option::spec_borrow(result).bytes == pk_bytes);
+ensures !spec_verify_proof_of_possession_internal(pk_bytes, pop.bytes) ==> std::option::spec_is_none(result);
+ensures [abstract] result == spec_public_key_from_bytes_with_pop(pk_bytes, pop);
+
+ + + + + +### Function `aggregate_pubkeys` + + +
public fun aggregate_pubkeys(public_keys: vector<bls12381::PublicKeyWithPoP>): bls12381::AggrPublicKeysWithPoP
+
+ + + + +
let bytes = spec_aggregate_pubkeys_internal_1(public_keys);
+let success = spec_aggregate_pubkeys_internal_2(public_keys);
+aborts_if !success;
+ensures result.bytes == bytes;
+
+ + + + + +### Function `aggregate_signatures` + + +
public fun aggregate_signatures(signatures: vector<bls12381::Signature>): option::Option<bls12381::AggrOrMultiSignature>
+
+ + + + +
aborts_if false;
+let bytes = spec_aggregate_signatures_internal_1(signatures);
+let success = spec_aggregate_signatures_internal_2(signatures);
+ensures success ==> (std::option::spec_is_some(result) && std::option::spec_borrow(result).bytes == bytes);
+ensures !success ==> std::option::spec_is_none(result);
+
+ + + + + +### Function `aggr_or_multi_signature_from_bytes` + + +
public fun aggr_or_multi_signature_from_bytes(bytes: vector<u8>): bls12381::AggrOrMultiSignature
+
+ + + + +
aborts_if len(bytes) != SIGNATURE_SIZE;
+ensures result.bytes == bytes;
+
+ + + + + +### Function `aggr_or_multi_signature_subgroup_check` + + +
public fun aggr_or_multi_signature_subgroup_check(signature: &bls12381::AggrOrMultiSignature): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_signature_subgroup_check_internal(signature.bytes);
+
+ + + + + +### Function `verify_aggregate_signature` + + +
public fun verify_aggregate_signature(aggr_sig: &bls12381::AggrOrMultiSignature, public_keys: vector<bls12381::PublicKeyWithPoP>, messages: vector<vector<u8>>): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_verify_aggregate_signature_internal(aggr_sig.bytes, public_keys, messages);
+
+ + + + + +### Function `verify_multisignature` + + +
public fun verify_multisignature(multisig: &bls12381::AggrOrMultiSignature, aggr_public_key: &bls12381::AggrPublicKeysWithPoP, message: vector<u8>): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_verify_multisignature_internal(multisig.bytes, aggr_public_key.bytes, message);
+
+ + + + + +### Function `verify_normal_signature` + + +
public fun verify_normal_signature(signature: &bls12381::Signature, public_key: &bls12381::PublicKey, message: vector<u8>): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_verify_normal_signature_internal(signature.bytes, public_key.bytes, message);
+
+ + + + + +### Function `verify_signature_share` + + +
public fun verify_signature_share(signature_share: &bls12381::Signature, public_key: &bls12381::PublicKeyWithPoP, message: vector<u8>): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_verify_signature_share_internal(signature_share.bytes, public_key.bytes, message);
+
+ + + + + +### Function `aggregate_pubkeys_internal` + + +
fun aggregate_pubkeys_internal(public_keys: vector<bls12381::PublicKeyWithPoP>): (vector<u8>, bool)
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result_1 == spec_aggregate_pubkeys_internal_1(public_keys);
+ensures result_2 == spec_aggregate_pubkeys_internal_2(public_keys);
+
+ + + + + +### Function `aggregate_signatures_internal` + + +
fun aggregate_signatures_internal(signatures: vector<bls12381::Signature>): (vector<u8>, bool)
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result_1 == spec_aggregate_signatures_internal_1(signatures);
+ensures result_2 == spec_aggregate_signatures_internal_2(signatures);
+
+ + + + + +### Function `validate_pubkey_internal` + + +
fun validate_pubkey_internal(public_key: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_validate_pubkey_internal(public_key);
+
+ + + + + +### Function `signature_subgroup_check_internal` + + +
fun signature_subgroup_check_internal(signature: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_signature_subgroup_check_internal(signature);
+
+ + + + + +### Function `verify_aggregate_signature_internal` + + +
fun verify_aggregate_signature_internal(aggsig: vector<u8>, public_keys: vector<bls12381::PublicKeyWithPoP>, messages: vector<vector<u8>>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_verify_aggregate_signature_internal(aggsig, public_keys, messages);
+
+ + + + + +### Function `verify_multisignature_internal` + + +
fun verify_multisignature_internal(multisignature: vector<u8>, agg_public_key: vector<u8>, message: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_verify_multisignature_internal(multisignature, agg_public_key, message);
+
+ + + + + +### Function `verify_normal_signature_internal` + + +
fun verify_normal_signature_internal(signature: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_verify_normal_signature_internal(signature, public_key, message);
+
+ + + + + +### Function `verify_proof_of_possession_internal` + + +
fun verify_proof_of_possession_internal(public_key: vector<u8>, proof_of_possesion: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_verify_proof_of_possession_internal(public_key, proof_of_possesion);
+
+ + + + + +### Function `verify_signature_share_internal` + + +
fun verify_signature_share_internal(signature_share: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_verify_signature_share_internal(signature_share, public_key, message);
+
+ + + + + +### Helper functions + + + + + + +
fun spec_aggregate_pubkeys_internal_1(public_keys: vector<PublicKeyWithPoP>): vector<u8>;
+
+ + + + + + + +
fun spec_public_key_from_bytes_with_pop(pk_bytes: vector<u8>, pop: ProofOfPossession): Option<PublicKeyWithPoP>;
+
+ + + + + + + +
fun spec_aggregate_pubkeys_internal_2(public_keys: vector<PublicKeyWithPoP>): bool;
+
+ + + + + + + +
fun spec_aggregate_signatures_internal_1(signatures: vector<Signature>): vector<u8>;
+
+ + + + + + + +
fun spec_aggregate_signatures_internal_2(signatures: vector<Signature>): bool;
+
+ + + + + + + +
fun spec_validate_pubkey_internal(public_key: vector<u8>): bool;
+
+ + + + + + + +
fun spec_signature_subgroup_check_internal(signature: vector<u8>): bool;
+
+ + + + + + + +
fun spec_verify_aggregate_signature_internal(
+   aggsig: vector<u8>,
+   public_keys: vector<PublicKeyWithPoP>,
+   messages: vector<vector<u8>>,
+): bool;
+
+ + + + + + + +
fun spec_verify_multisignature_internal(
+   multisignature: vector<u8>,
+   agg_public_key: vector<u8>,
+   message: vector<u8>
+): bool;
+
+ + + + + + + +
fun spec_verify_normal_signature_internal(
+   signature: vector<u8>,
+   public_key: vector<u8>,
+   message: vector<u8>
+): bool;
+
+ + + + + + + +
fun spec_verify_proof_of_possession_internal(
+   public_key: vector<u8>,
+   proof_of_possesion: vector<u8>
+): bool;
+
+ + + + + + + +
fun spec_verify_signature_share_internal(
+   signature_share: vector<u8>,
+   public_key: vector<u8>,
+   message: vector<u8>
+): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bls12381_algebra.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bls12381_algebra.md new file mode 100644 index 0000000000000..9a6a39faec328 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bls12381_algebra.md @@ -0,0 +1,631 @@ + + + +# Module `0x1::bls12381_algebra` + +This module defines marker types, constants and test cases for working with BLS12-381 curves +using the generic API defined in algebra.move. +See https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-pairing-friendly-curves-11#name-bls-curves-for-the-128-bit- +for the full specification of BLS12-381 curves. + +Currently-supported BLS12-381 structures include Fq12, Fr, G1, G2 and Gt, +along with their widely-used serialization formats, +the pairing between G1, G2 and Gt, +and the hash-to-curve operations for G1 and G2 defined in https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-16. + +Other unimplemented BLS12-381 structures and serialization formats are also listed here, +as they help define some of the currently supported structures. +Their implementation may also be added in the future. + +Fq: the finite field $F_q$ used in BLS12-381 curves with a prime order $q$ equal to +0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab. + +FormatFqLsb: a serialization format for Fq elements, +where an element is represented by a byte array b[] of size 48 with the least significant byte (LSB) coming first. + +FormatFqMsb: a serialization format for Fq elements, +where an element is represented by a byte array b[] of size 48 with the most significant byte (MSB) coming first. + +Fq2: the finite field $F_{q^2}$ used in BLS12-381 curves, +which is an extension field of Fq, constructed as $F_{q^2}=F_q[u]/(u^2+1)$. + +FormatFq2LscLsb: a serialization format for Fq2 elements, +where an element in the form $(c_0+c_1\cdot u)$ is represented by a byte array b[] of size 96, +which is a concatenation of its coefficients serialized, with the least significant coefficient (LSC) coming first: +- b[0..48] is $c_0$ serialized using FormatFqLsb. +- b[48..96] is $c_1$ serialized using FormatFqLsb. + +FormatFq2MscMsb: a serialization format for Fq2 elements, +where an element in the form $(c_0+c_1\cdot u)$ is represented by a byte array b[] of size 96, +which is a concatenation of its coefficients serialized, with the most significant coefficient (MSC) coming first: +- b[0..48] is $c_1$ serialized using FormatFqLsb. +- b[48..96] is $c_0$ serialized using FormatFqLsb. + +Fq6: the finite field $F_{q^6}$ used in BLS12-381 curves, +which is an extension field of Fq2, constructed as $F_{q^6}=F_{q^2}[v]/(v^3-u-1)$. + +FormatFq6LscLsb: a serialization scheme for Fq6 elements, +where an element in the form $(c_0+c_1\cdot v+c_2\cdot v^2)$ is represented by a byte array b[] of size 288, +which is a concatenation of its coefficients serialized, with the least significant coefficient (LSC) coming first: +- b[0..96] is $c_0$ serialized using FormatFq2LscLsb. +- b[96..192] is $c_1$ serialized using FormatFq2LscLsb. +- b[192..288] is $c_2$ serialized using FormatFq2LscLsb. + +G1Full: a group constructed by the points on the BLS12-381 curve $E(F_q): y^2=x^3+4$ and the point at infinity, +under the elliptic curve point addition. +It contains the prime-order subgroup $G_1$ used in pairing. + +G2Full: a group constructed by the points on a curve $E'(F_{q^2}): y^2=x^3+4(u+1)$ and the point at infinity, +under the elliptic curve point addition. +It contains the prime-order subgroup $G_2$ used in pairing. + + +- [Struct `Fq12`](#0x1_bls12381_algebra_Fq12) +- [Struct `FormatFq12LscLsb`](#0x1_bls12381_algebra_FormatFq12LscLsb) +- [Struct `G1`](#0x1_bls12381_algebra_G1) +- [Struct `FormatG1Uncompr`](#0x1_bls12381_algebra_FormatG1Uncompr) +- [Struct `FormatG1Compr`](#0x1_bls12381_algebra_FormatG1Compr) +- [Struct `G2`](#0x1_bls12381_algebra_G2) +- [Struct `FormatG2Uncompr`](#0x1_bls12381_algebra_FormatG2Uncompr) +- [Struct `FormatG2Compr`](#0x1_bls12381_algebra_FormatG2Compr) +- [Struct `Gt`](#0x1_bls12381_algebra_Gt) +- [Struct `FormatGt`](#0x1_bls12381_algebra_FormatGt) +- [Struct `Fr`](#0x1_bls12381_algebra_Fr) +- [Struct `FormatFrLsb`](#0x1_bls12381_algebra_FormatFrLsb) +- [Struct `FormatFrMsb`](#0x1_bls12381_algebra_FormatFrMsb) +- [Struct `HashG1XmdSha256SswuRo`](#0x1_bls12381_algebra_HashG1XmdSha256SswuRo) +- [Struct `HashG2XmdSha256SswuRo`](#0x1_bls12381_algebra_HashG2XmdSha256SswuRo) + + +
+ + + + + +## Struct `Fq12` + +The finite field $F_{q^12}$ used in BLS12-381 curves, +which is an extension field of Fq6 (defined in the module documentation), constructed as $F_{q^12}=F_{q^6}[w]/(w^2-v)$. + + +
struct Fq12
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFq12LscLsb` + +A serialization scheme for Fq12 elements, +where an element $(c_0+c_1\cdot w)$ is represented by a byte array b[] of size 576, +which is a concatenation of its coefficients serialized, with the least significant coefficient (LSC) coming first. +- b[0..288] is $c_0$ serialized using FormatFq6LscLsb (defined in the module documentation). +- b[288..576] is $c_1$ serialized using FormatFq6LscLsb. + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0. + + +
struct FormatFq12LscLsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `G1` + +The group $G_1$ in BLS12-381-based pairing $G_1 \times G_2 \rightarrow G_t$. +It is a subgroup of G1Full (defined in the module documentation) with a prime order $r$ +equal to 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. +(so Fr is the associated scalar field). + + +
struct G1
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG1Uncompr` + +A serialization scheme for G1 elements derived from +https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-11.html#name-zcash-serialization-format-. + +Below is the serialization procedure that takes a G1 element p and outputs a byte array of size 96. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x and y into b_x[] and b_y[] respectively using FormatFqMsb (defined in the module documentation). +1. Concatenate b_x[] and b_y[] into b[]. +1. If p is the point at infinity, set the infinity bit: b[0]: = b[0] | 0x40. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G1 element or none. +1. If the size of b[] is not 96, return none. +1. Compute the compression flag as b[0] & 0x80 != 0. +1. If the compression flag is true, return none. +1. Compute the infinity flag as b[0] & 0x40 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Deserialize [b[0] & 0x1f, b[1], ..., b[47]] to x using FormatFqMsb. If x is none, return none. +1. Deserialize [b[48], ..., b[95]] to y using FormatFqMsb. If y is none, return none. +1. Check if (x,y) is on curve E. If not, return none. +1. Check if (x,y) is in the subgroup of order r. If not, return none. +1. Return (x,y). + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0. + + +
struct FormatG1Uncompr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG1Compr` + +A serialization scheme for G1 elements derived from +https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-11.html#name-zcash-serialization-format-. + +Below is the serialization procedure that takes a G1 element p and outputs a byte array of size 48. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x into b[] using FormatFqMsb (defined in the module documentation). +1. Set the compression bit: b[0] := b[0] | 0x80. +1. If p is the point at infinity, set the infinity bit: b[0]: = b[0] | 0x40. +1. If y > -y, set the lexicographical flag: b[0] := b[0] | 0x20. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G1 element or none. +1. If the size of b[] is not 48, return none. +1. Compute the compression flag as b[0] & 0x80 != 0. +1. If the compression flag is false, return none. +1. Compute the infinity flag as b[0] & 0x40 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Compute the lexicographical flag as b[0] & 0x20 != 0. +1. Deserialize [b[0] & 0x1f, b[1], ..., b[47]] to x using FormatFqMsb. If x is none, return none. +1. Solve the curve equation with x for y. If no such y exists, return none. +1. Let y' be max(y,-y) if the lexicographical flag is set, or min(y,-y) otherwise. +1. Check if (x,y') is in the subgroup of order r. If not, return none. +1. Return (x,y'). + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0. + + +
struct FormatG1Compr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `G2` + +The group $G_2$ in BLS12-381-based pairing $G_1 \times G_2 \rightarrow G_t$. +It is a subgroup of G2Full (defined in the module documentation) with a prime order $r$ equal to +0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. +(so Fr is the scalar field). + + +
struct G2
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG2Uncompr` + +A serialization scheme for G2 elements derived from +https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-11.html#name-zcash-serialization-format-. + +Below is the serialization procedure that takes a G2 element p and outputs a byte array of size 192. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x and y into b_x[] and b_y[] respectively using FormatFq2MscMsb (defined in the module documentation). +1. Concatenate b_x[] and b_y[] into b[]. +1. If p is the point at infinity, set the infinity bit in b[]: b[0]: = b[0] | 0x40. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G2 element or none. +1. If the size of b[] is not 192, return none. +1. Compute the compression flag as b[0] & 0x80 != 0. +1. If the compression flag is true, return none. +1. Compute the infinity flag as b[0] & 0x40 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Deserialize [b[0] & 0x1f, ..., b[95]] to x using FormatFq2MscMsb. If x is none, return none. +1. Deserialize [b[96], ..., b[191]] to y using FormatFq2MscMsb. If y is none, return none. +1. Check if (x,y) is on the curve E'. If not, return none. +1. Check if (x,y) is in the subgroup of order r. If not, return none. +1. Return (x,y). + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0. + + +
struct FormatG2Uncompr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG2Compr` + +A serialization scheme for G2 elements derived from +https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-11.html#name-zcash-serialization-format-. + +Below is the serialization procedure that takes a G2 element p and outputs a byte array of size 96. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x into b[] using FormatFq2MscMsb (defined in the module documentation). +1. Set the compression bit: b[0] := b[0] | 0x80. +1. If p is the point at infinity, set the infinity bit: b[0]: = b[0] | 0x40. +1. If y > -y, set the lexicographical flag: b[0] := b[0] | 0x20. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G2 element or none. +1. If the size of b[] is not 96, return none. +1. Compute the compression flag as b[0] & 0x80 != 0. +1. If the compression flag is false, return none. +1. Compute the infinity flag as b[0] & 0x40 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Compute the lexicographical flag as b[0] & 0x20 != 0. +1. Deserialize [b[0] & 0x1f, b[1], ..., b[95]] to x using FormatFq2MscMsb. If x is none, return none. +1. Solve the curve equation with x for y. If no such y exists, return none. +1. Let y' be max(y,-y) if the lexicographical flag is set, or min(y,-y) otherwise. +1. Check if (x,y') is in the subgroup of order r. If not, return none. +1. Return (x,y'). + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0. + + +
struct FormatG2Compr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `Gt` + +The group $G_t$ in BLS12-381-based pairing $G_1 \times G_2 \rightarrow G_t$. +It is a multiplicative subgroup of Fq12, +with a prime order $r$ equal to 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. +(so Fr is the scalar field). +The identity of Gt is 1. + + +
struct Gt
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatGt` + +A serialization scheme for Gt elements. + +To serialize, it treats a Gt element p as an Fq12 element and serialize it using FormatFq12LscLsb. + +To deserialize, it uses FormatFq12LscLsb to try deserializing to an Fq12 element then test the membership in Gt. + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0. + + +
struct FormatGt
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `Fr` + +The finite field $F_r$ that can be used as the scalar fields +associated with the groups $G_1$, $G_2$, $G_t$ in BLS12-381-based pairing. + + +
struct Fr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFrLsb` + +A serialization format for Fr elements, +where an element is represented by a byte array b[] of size 32 with the least significant byte (LSB) coming first. + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0, blst-0.3.7. + + +
struct FormatFrLsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFrMsb` + +A serialization scheme for Fr elements, +where an element is represented by a byte array b[] of size 32 with the most significant byte (MSB) coming first. + +NOTE: other implementation(s) using this format: ark-bls12-381-0.4.0, blst-0.3.7. + + +
struct FormatFrMsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `HashG1XmdSha256SswuRo` + +The hash-to-curve suite BLS12381G1_XMD:SHA-256_SSWU_RO_ that hashes a byte array into G1 elements. + +Full specification is defined in https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-16#name-bls12-381-g1. + + +
struct HashG1XmdSha256SswuRo
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `HashG2XmdSha256SswuRo` + +The hash-to-curve suite BLS12381G2_XMD:SHA-256_SSWU_RO_ that hashes a byte array into G2 elements. + +Full specification is defined in https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-16#name-bls12-381-g2. + + +
struct HashG2XmdSha256SswuRo
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bn254_algebra.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bn254_algebra.md new file mode 100644 index 0000000000000..e535bcd84fed6 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/bn254_algebra.md @@ -0,0 +1,665 @@ + + + +# Module `0x1::bn254_algebra` + +This module defines marker types, constants and test cases for working with BN254 curves using the generic API defined in algebra.move. +BN254 was sampled as part of the [\[BCTV14\]](https://eprint.iacr.org/2013/879.pdf) paper . +The name denotes that it is a Barreto-Naehrig curve of embedding degree 12, defined over a 254-bit (prime) field. +The scalar field is highly 2-adic which supports subgroups of roots of unity of size <= 2^28. +(as (21888242871839275222246405745257275088548364400416034343698204186575808495617 - 1) mod 2^28 = 0) + +This curve is also implemented in [libff](https://github.com/scipr-lab/libff/tree/master/libff/algebra/curves/alt_bn128) under the name bn128. +It is the same as the bn254 curve used in Ethereum (eg: [go-ethereum](https://github.com/ethereum/go-ethereum/tree/master/crypto/bn254/cloudflare)). + + + + +## CAUTION + +**This curve does not satisfy the 128-bit security level anymore.** + +Its current security is estimated at 128-bits (see "Updating Key Size Estimations for Pairings"; by Barbulescu, Razvan and Duquesne, Sylvain; in Journal of Cryptology; 2019; https://doi.org/10.1007/s00145-018-9280-5) + + +Curve information: +* Base field: q = +21888242871839275222246405745257275088696311157297823662689037894645226208583 +* Scalar field: r = +21888242871839275222246405745257275088548364400416034343698204186575808495617 +* valuation(q - 1, 2) = 1 +* valuation(r - 1, 2) = 28 +* G1 curve equation: y^2 = x^3 + 3 +* G2 curve equation: y^2 = x^3 + B, where +* B = 3/(u+9) where Fq2 is represented as Fq\[u\]/(u^2+1) = +Fq2(19485874751759354771024239261021720505790618469301721065564631296452457478373, +266929791119991161246907387137283842545076965332900288569378510910307636690) + + +Currently-supported BN254 structures include Fq12, Fr, Fq, Fq2, G1, G2 and Gt, +along with their widely-used serialization formats, +the pairing between G1, G2 and Gt. + +Other unimplemented BN254 structures and serialization formats are also listed here, +as they help define some of the currently supported structures. +Their implementation may also be added in the future. + +Fq2: The finite field $F_{q^2}$ that can be used as the base field of $G_2$ +which is an extension field of Fq, constructed as $F_{q^2}=F_{q}[u]/(u^2+1)$. + +FormatFq2LscLsb: A serialization scheme for Fq2 elements, +where an element $(c_0+c_1\cdot u)$ is represented by a byte array b[] of size N=64, +which is a concatenation of its coefficients serialized, with the least significant coefficient (LSC) coming first. +- b[0..32] is $c_0$ serialized using FormatFqLscLsb. +- b[32..64] is $c_1$ serialized using FormatFqLscLsb. + +Fq6: the finite field $F_{q^6}$ used in BN254 curves, +which is an extension field of Fq2, constructed as $F_{q^6}=F_{q^2}[v]/(v^3-u-9)$. + +FormatFq6LscLsb: a serialization scheme for Fq6 elements, +where an element in the form $(c_0+c_1\cdot v+c_2\cdot v^2)$ is represented by a byte array b[] of size 192, +which is a concatenation of its coefficients serialized, with the least significant coefficient (LSC) coming first: +- b[0..64] is $c_0$ serialized using FormatFq2LscLsb. +- b[64..128] is $c_1$ serialized using FormatFq2LscLsb. +- b[128..192] is $c_2$ serialized using FormatFq2LscLsb. + +G1Full: a group constructed by the points on the BN254 curve $E(F_q): y^2=x^3+3$ and the point at infinity, +under the elliptic curve point addition. +It contains the prime-order subgroup $G_1$ used in pairing. + +G2Full: a group constructed by the points on a curve $E'(F_{q^2}): y^2=x^3+3/(u+9)$ and the point at infinity, +under the elliptic curve point addition. +It contains the prime-order subgroup $G_2$ used in pairing. + + +- [CAUTION](#@CAUTION_0) +- [Struct `Fr`](#0x1_bn254_algebra_Fr) +- [Struct `FormatFrLsb`](#0x1_bn254_algebra_FormatFrLsb) +- [Struct `FormatFrMsb`](#0x1_bn254_algebra_FormatFrMsb) +- [Struct `Fq`](#0x1_bn254_algebra_Fq) +- [Struct `FormatFqLsb`](#0x1_bn254_algebra_FormatFqLsb) +- [Struct `FormatFqMsb`](#0x1_bn254_algebra_FormatFqMsb) +- [Struct `Fq12`](#0x1_bn254_algebra_Fq12) +- [Struct `FormatFq12LscLsb`](#0x1_bn254_algebra_FormatFq12LscLsb) +- [Struct `G1`](#0x1_bn254_algebra_G1) +- [Struct `FormatG1Uncompr`](#0x1_bn254_algebra_FormatG1Uncompr) +- [Struct `FormatG1Compr`](#0x1_bn254_algebra_FormatG1Compr) +- [Struct `G2`](#0x1_bn254_algebra_G2) +- [Struct `FormatG2Uncompr`](#0x1_bn254_algebra_FormatG2Uncompr) +- [Struct `FormatG2Compr`](#0x1_bn254_algebra_FormatG2Compr) +- [Struct `Gt`](#0x1_bn254_algebra_Gt) +- [Struct `FormatGt`](#0x1_bn254_algebra_FormatGt) + + +
+ + + + + +## Struct `Fr` + +The finite field $F_r$ that can be used as the scalar fields +associated with the groups $G_1$, $G_2$, $G_t$ in BN254-based pairing. + + +
struct Fr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFrLsb` + +A serialization format for Fr elements, +where an element is represented by a byte array b[] of size 32 with the least significant byte (LSB) coming first. + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatFrLsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFrMsb` + +A serialization scheme for Fr elements, +where an element is represented by a byte array b[] of size 32 with the most significant byte (MSB) coming first. + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatFrMsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `Fq` + +The finite field $F_q$ that can be used as the base field of $G_1$ + + +
struct Fq
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFqLsb` + +A serialization format for Fq elements, +where an element is represented by a byte array b[] of size 32 with the least significant byte (LSB) coming first. + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatFqLsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFqMsb` + +A serialization scheme for Fq elements, +where an element is represented by a byte array b[] of size 32 with the most significant byte (MSB) coming first. + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatFqMsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `Fq12` + +The finite field $F_{q^12}$ used in BN254 curves, +which is an extension field of Fq6 (defined in the module documentation), constructed as $F_{q^12}=F_{q^6}[w]/(w^2-v)$. +The field can downcast to Gt if it's an element of the multiplicative subgroup Gt of Fq12 +with a prime order $r$ = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. + + +
struct Fq12
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatFq12LscLsb` + +A serialization scheme for Fq12 elements, +where an element $(c_0+c_1\cdot w)$ is represented by a byte array b[] of size 384, +which is a concatenation of its coefficients serialized, with the least significant coefficient (LSC) coming first. +- b[0..192] is $c_0$ serialized using FormatFq6LscLsb (defined in the module documentation). +- b[192..384] is $c_1$ serialized using FormatFq6LscLsb. + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatFq12LscLsb
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `G1` + +The group $G_1$ in BN254-based pairing $G_1 \times G_2 \rightarrow G_t$. +It is a subgroup of G1Full (defined in the module documentation) with a prime order $r$ +equal to 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. +(so Fr is the associated scalar field). + + +
struct G1
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG1Uncompr` + +A serialization scheme for G1 elements derived from arkworks.rs. + +Below is the serialization procedure that takes a G1 element p and outputs a byte array of size N=64. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x and y into b_x[] and b_y[] respectively using FormatFqLsb (defined in the module documentation). +1. Concatenate b_x[] and b_y[] into b[]. +1. If p is the point at infinity, set the infinity bit: b[N-1]: = b[N-1] | 0b0100_0000. +1. If y > -y, set the lexicographical bit: b[N-1]: = b[N-1] | 0b1000_0000. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G1 element or none. +1. If the size of b[] is not N, return none. +1. Compute the infinity flag as b[N-1] & 0b0100_0000 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Deserialize [b[0], b[1], ..., b[N/2-1]] to x using FormatFqLsb. If x is none, return none. +1. Deserialize [b[N/2], ..., b[N] & 0b0011_1111] to y using FormatFqLsb. If y is none, return none. +1. Check if (x,y) is on curve E. If not, return none. +1. Check if (x,y) is in the subgroup of order r. If not, return none. +1. Return (x,y). + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatG1Uncompr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG1Compr` + +A serialization scheme for G1 elements derived from arkworks.rs + +Below is the serialization procedure that takes a G1 element p and outputs a byte array of size N=32. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x into b[] using FormatFqLsb (defined in the module documentation). +1. If p is the point at infinity, set the infinity bit: b[N-1]: = b[N-1] | 0b0100_0000. +1. If y > -y, set the lexicographical flag: b[N-1] := b[N-1] | 0x1000_0000. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G1 element or none. +1. If the size of b[] is not N, return none. +1. Compute the infinity flag as b[N-1] & 0b0100_0000 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Compute the lexicographical flag as b[N-1] & 0b1000_0000 != 0. +1. Deserialize [b[0], b[1], ..., b[N/2-1] & 0b0011_1111] to x using FormatFqLsb. If x is none, return none. +1. Solve the curve equation with x for y. If no such y exists, return none. +1. Let y' be max(y,-y) if the lexicographical flag is set, or min(y,-y) otherwise. +1. Check if (x,y') is in the subgroup of order r. If not, return none. +1. Return (x,y'). + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatG1Compr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `G2` + +The group $G_2$ in BN254-based pairing $G_1 \times G_2 \rightarrow G_t$. +It is a subgroup of G2Full (defined in the module documentation) with a prime order $r$ equal to +0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. +(so Fr is the scalar field). + + +
struct G2
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG2Uncompr` + +A serialization scheme for G2 elements derived from arkworks.rs. + +Below is the serialization procedure that takes a G2 element p and outputs a byte array of size N=128. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x and y into b_x[] and b_y[] respectively using FormatFq2LscLsb (defined in the module documentation). +1. Concatenate b_x[] and b_y[] into b[]. +1. If p is the point at infinity, set the infinity bit: b[N-1]: = b[N-1] | 0b0100_0000. +1. If y > -y, set the lexicographical bit: b[N-1]: = b[N-1] | 0b1000_0000. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G1 element or none. +1. If the size of b[] is not N, return none. +1. Compute the infinity flag as b[N-1] & 0b0100_0000 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Deserialize [b[0], b[1], ..., b[N/2-1]] to x using FormatFq2LscLsb. If x is none, return none. +1. Deserialize [b[N/2], ..., b[N] & 0b0011_1111] to y using FormatFq2LscLsb. If y is none, return none. +1. Check if (x,y) is on curve E. If not, return none. +1. Check if (x,y) is in the subgroup of order r. If not, return none. +1. Return (x,y). + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatG2Uncompr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatG2Compr` + +A serialization scheme for G1 elements derived from arkworks.rs + +Below is the serialization procedure that takes a G1 element p and outputs a byte array of size N=64. +1. Let (x,y) be the coordinates of p if p is on the curve, or (0,0) otherwise. +1. Serialize x into b[] using FormatFq2LscLsb (defined in the module documentation). +1. If p is the point at infinity, set the infinity bit: b[N-1]: = b[N-1] | 0b0100_0000. +1. If y > -y, set the lexicographical flag: b[N-1] := b[N-1] | 0x1000_0000. +1. Return b[]. + +Below is the deserialization procedure that takes a byte array b[] and outputs either a G1 element or none. +1. If the size of b[] is not N, return none. +1. Compute the infinity flag as b[N-1] & 0b0100_0000 != 0. +1. If the infinity flag is set, return the point at infinity. +1. Compute the lexicographical flag as b[N-1] & 0b1000_0000 != 0. +1. Deserialize [b[0], b[1], ..., b[N/2-1] & 0b0011_1111] to x using FormatFq2LscLsb. If x is none, return none. +1. Solve the curve equation with x for y. If no such y exists, return none. +1. Let y' be max(y,-y) if the lexicographical flag is set, or min(y,-y) otherwise. +1. Check if (x,y') is in the subgroup of order r. If not, return none. +1. Return (x,y'). + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatG2Compr
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `Gt` + +The group $G_t$ in BN254-based pairing $G_1 \times G_2 \rightarrow G_t$. +It is a multiplicative subgroup of Fq12, so it can upcast to Fq12. +with a prime order $r$ equal to 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001. +(so Fr is the scalar field). +The identity of Gt is 1. + + +
struct Gt
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FormatGt` + +A serialization scheme for Gt elements. + +To serialize, it treats a Gt element p as an Fq12 element and serialize it using FormatFq12LscLsb. + +To deserialize, it uses FormatFq12LscLsb to try deserializing to an Fq12 element then test the membership in Gt. + +NOTE: other implementation(s) using this format: ark-bn254-0.4.0. + + +
struct FormatGt
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/capability.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/capability.md new file mode 100644 index 0000000000000..b9a8aa8399f69 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/capability.md @@ -0,0 +1,735 @@ + + + +# Module `0x1::capability` + +A module which defines the basic concept of +[*capabilities*](https://en.wikipedia.org/wiki/Capability-based_security) for managing access control. + +EXPERIMENTAL + + + + +## Overview + + +A capability is a unforgeable token which testifies that a signer has authorized a certain operation. +The token is valid during the transaction where it is obtained. Since the type capability::Cap has +no ability to be stored in global memory, capabilities cannot leak out of a transaction. For every function +called within a transaction which has a capability as a parameter, it is guaranteed that the capability +has been obtained via a proper signer-based authorization step previously in the transaction's execution. + + + + +### Usage + + +Initializing and acquiring capabilities is usually encapsulated in a module with a type +tag which can only be constructed by this module. + +``` +module Pkg::Feature { +use std::capability::Cap; + +/// A type tag used in Cap. Only this module can create an instance, +/// and there is no public function other than Self::acquire which returns a value of this type. +/// This way, this module has full control how Cap is given out. +struct Feature has drop {} + +/// Initializes this module. +public fun initialize(s: &signer) { +// Create capability. This happens once at module initialization time. +// One needs to provide a witness for being the owner of Feature +// in the 2nd parameter. +<> +capability::create(s, &Feature{}); +} + +/// Acquires the capability to work with this feature. +public fun acquire(s: &signer): Cap { +<> +capability::acquire(s, &Feature{}); +} + +/// Does something related to the feature. The caller must pass a Cap. +public fun do_something(_cap: Cap) { ... } +} +``` + + + + +### Delegation + + +Capabilities come with the optional feature of *delegation*. Via Self::delegate, an owner of a capability +can designate another signer to be also capable of acquiring the capability. Like the original creator, +the delegate needs to present his signer to obtain the capability in his transactions. Delegation can +be revoked via Self::revoke, removing this access right from the delegate. + +While the basic authorization mechanism for delegates is the same as with core capabilities, the +target of delegation might be subject of restrictions which need to be specified and verified. This can +be done via global invariants in the specification language. For example, in order to prevent delegation +all together for a capability, one can use the following invariant: + +``` +invariant forall a: address where capability::spec_has_cap(a): +len(capability::spec_delegates(a)) == 0; +``` + +Similarly, the following invariant would enforce that delegates, if existent, must satisfy a certain +predicate: + +``` +invariant forall a: address where capability::spec_has_cap(a): +forall d in capability::spec_delegates(a): +is_valid_delegate_for_feature(d); +``` + + +- [Overview](#@Overview_0) + - [Usage](#@Usage_1) + - [Delegation](#@Delegation_2) +- [Struct `Cap`](#0x1_capability_Cap) +- [Struct `LinearCap`](#0x1_capability_LinearCap) +- [Resource `CapState`](#0x1_capability_CapState) +- [Resource `CapDelegateState`](#0x1_capability_CapDelegateState) +- [Constants](#@Constants_3) +- [Function `create`](#0x1_capability_create) +- [Function `acquire`](#0x1_capability_acquire) +- [Function `acquire_linear`](#0x1_capability_acquire_linear) +- [Function `validate_acquire`](#0x1_capability_validate_acquire) +- [Function `root_addr`](#0x1_capability_root_addr) +- [Function `linear_root_addr`](#0x1_capability_linear_root_addr) +- [Function `delegate`](#0x1_capability_delegate) +- [Function `revoke`](#0x1_capability_revoke) +- [Function `remove_element`](#0x1_capability_remove_element) +- [Function `add_element`](#0x1_capability_add_element) +- [Specification](#@Specification_4) + - [Function `create`](#@Specification_4_create) + - [Function `acquire`](#@Specification_4_acquire) + - [Function `acquire_linear`](#@Specification_4_acquire_linear) + - [Function `delegate`](#@Specification_4_delegate) + - [Function `revoke`](#@Specification_4_revoke) + - [Function `remove_element`](#@Specification_4_remove_element) + - [Function `add_element`](#@Specification_4_add_element) + + +
use 0x1::error;
+use 0x1::signer;
+use 0x1::vector;
+
+ + + + + +## Struct `Cap` + +The token representing an acquired capability. Cannot be stored in memory, but copied and dropped freely. + + +
struct Cap<Feature> has copy, drop
+
+ + + +
+Fields + + +
+
+root: address +
+
+ +
+
+ + +
+ + + +## Struct `LinearCap` + +A linear version of a capability token. This can be used if an acquired capability should be enforced +to be used only once for an authorization. + + +
struct LinearCap<Feature> has drop
+
+ + + +
+Fields + + +
+
+root: address +
+
+ +
+
+ + +
+ + + +## Resource `CapState` + +An internal data structure for representing a configured capability. + + +
struct CapState<Feature> has key
+
+ + + +
+Fields + + +
+
+delegates: vector<address> +
+
+ +
+
+ + +
+ + + +## Resource `CapDelegateState` + +An internal data structure for representing a configured delegated capability. + + +
struct CapDelegateState<Feature> has key
+
+ + + +
+Fields + + +
+
+root: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Capability resource already exists on the specified account + + +
const ECAPABILITY_ALREADY_EXISTS: u64 = 1;
+
+ + + + + +Capability resource not found + + +
const ECAPABILITY_NOT_FOUND: u64 = 2;
+
+ + + + + +Account does not have delegated permissions + + +
const EDELEGATE: u64 = 3;
+
+ + + + + +## Function `create` + +Creates a new capability class, owned by the passed signer. A caller must pass a witness that +they own the Feature type parameter. + + +
public fun create<Feature>(owner: &signer, _feature_witness: &Feature)
+
+ + + +
+Implementation + + +
public fun create<Feature>(owner: &signer, _feature_witness: &Feature) {
+    let addr = signer::address_of(owner);
+    assert!(!exists<CapState<Feature>>(addr), error::already_exists(ECAPABILITY_ALREADY_EXISTS));
+    move_to<CapState<Feature>>(owner, CapState { delegates: vector::empty() });
+}
+
+ + + +
+ + + +## Function `acquire` + +Acquires a capability token. Only the owner of the capability class, or an authorized delegate, +can succeed with this operation. A caller must pass a witness that they own the Feature type +parameter. + + +
public fun acquire<Feature>(requester: &signer, _feature_witness: &Feature): capability::Cap<Feature>
+
+ + + +
+Implementation + + +
public fun acquire<Feature>(requester: &signer, _feature_witness: &Feature): Cap<Feature>
+acquires CapState, CapDelegateState {
+    Cap<Feature> { root: validate_acquire<Feature>(requester) }
+}
+
+ + + +
+ + + +## Function `acquire_linear` + +Acquires a linear capability token. It is up to the module which owns Feature to decide +whether to expose a linear or non-linear capability. + + +
public fun acquire_linear<Feature>(requester: &signer, _feature_witness: &Feature): capability::LinearCap<Feature>
+
+ + + +
+Implementation + + +
public fun acquire_linear<Feature>(requester: &signer, _feature_witness: &Feature): LinearCap<Feature>
+acquires CapState, CapDelegateState {
+    LinearCap<Feature> { root: validate_acquire<Feature>(requester) }
+}
+
+ + + +
+ + + +## Function `validate_acquire` + +Helper to validate an acquire. Returns the root address of the capability. + + +
fun validate_acquire<Feature>(requester: &signer): address
+
+ + + +
+Implementation + + +
fun validate_acquire<Feature>(requester: &signer): address
+acquires CapState, CapDelegateState {
+    let addr = signer::address_of(requester);
+    if (exists<CapDelegateState<Feature>>(addr)) {
+        let root_addr = borrow_global<CapDelegateState<Feature>>(addr).root;
+        // double check that requester is actually registered as a delegate
+        assert!(exists<CapState<Feature>>(root_addr), error::invalid_state(EDELEGATE));
+        assert!(vector::contains(&borrow_global<CapState<Feature>>(root_addr).delegates, &addr),
+            error::invalid_state(EDELEGATE));
+        root_addr
+    } else {
+        assert!(exists<CapState<Feature>>(addr), error::not_found(ECAPABILITY_NOT_FOUND));
+        addr
+    }
+}
+
+ + + +
+ + + +## Function `root_addr` + +Returns the root address associated with the given capability token. Only the owner +of the feature can do this. + + +
public fun root_addr<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature): address
+
+ + + +
+Implementation + + +
public fun root_addr<Feature>(self: Cap<Feature>, _feature_witness: &Feature): address {
+    self.root
+}
+
+ + + +
+ + + +## Function `linear_root_addr` + +Returns the root address associated with the given linear capability token. + + +
public fun linear_root_addr<Feature>(self: capability::LinearCap<Feature>, _feature_witness: &Feature): address
+
+ + + +
+Implementation + + +
public fun linear_root_addr<Feature>(self: LinearCap<Feature>, _feature_witness: &Feature): address {
+    self.root
+}
+
+ + + +
+ + + +## Function `delegate` + +Registers a delegation relation. If the relation already exists, this function does +nothing. + + +
public fun delegate<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
+ + + +
+Implementation + + +
public fun delegate<Feature>(self: Cap<Feature>, _feature_witness: &Feature, to: &signer)
+acquires CapState {
+    let addr = signer::address_of(to);
+    if (exists<CapDelegateState<Feature>>(addr)) return;
+    move_to(to, CapDelegateState<Feature> { root: self.root });
+    add_element(&mut borrow_global_mut<CapState<Feature>>(self.root).delegates, addr);
+}
+
+ + + +
+ + + +## Function `revoke` + +Revokes a delegation relation. If no relation exists, this function does nothing. + + +
public fun revoke<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
+
+ + + +
+Implementation + + +
public fun revoke<Feature>(self: Cap<Feature>, _feature_witness: &Feature, from: address)
+acquires CapState, CapDelegateState
+{
+    if (!exists<CapDelegateState<Feature>>(from)) return;
+    let CapDelegateState { root: _root } = move_from<CapDelegateState<Feature>>(from);
+    remove_element(&mut borrow_global_mut<CapState<Feature>>(self.root).delegates, &from);
+}
+
+ + + +
+ + + +## Function `remove_element` + +Helper to remove an element from a vector. + + +
fun remove_element<E: drop>(v: &mut vector<E>, x: &E)
+
+ + + +
+Implementation + + +
fun remove_element<E: drop>(v: &mut vector<E>, x: &E) {
+    let (found, index) = vector::index_of(v, x);
+    if (found) {
+        vector::remove(v, index);
+    }
+}
+
+ + + +
+ + + +## Function `add_element` + +Helper to add an element to a vector. + + +
fun add_element<E: drop>(v: &mut vector<E>, x: E)
+
+ + + +
+Implementation + + +
fun add_element<E: drop>(v: &mut vector<E>, x: E) {
+    if (!vector::contains(v, &x)) {
+        vector::push_back(v, x)
+    }
+}
+
+ + + +
+ + + +## Specification + +Helper specification function to check whether a capability exists at address. + + + + + +
fun spec_has_cap<Feature>(addr: address): bool {
+   exists<CapState<Feature>>(addr)
+}
+
+ + +Helper specification function to obtain the delegates of a capability. + + + + + +
fun spec_delegates<Feature>(addr: address): vector<address> {
+   global<CapState<Feature>>(addr).delegates
+}
+
+ + +Helper specification function to check whether a delegated capability exists at address. + + + + + +
fun spec_has_delegate_cap<Feature>(addr: address): bool {
+   exists<CapDelegateState<Feature>>(addr)
+}
+
+ + + + + +### Function `create` + + +
public fun create<Feature>(owner: &signer, _feature_witness: &Feature)
+
+ + + + +
let addr = signer::address_of(owner);
+aborts_if spec_has_cap<Feature>(addr);
+ensures spec_has_cap<Feature>(addr);
+
+ + + + + +### Function `acquire` + + +
public fun acquire<Feature>(requester: &signer, _feature_witness: &Feature): capability::Cap<Feature>
+
+ + + + +
let addr = signer::address_of(requester);
+let root_addr = global<CapDelegateState<Feature>>(addr).root;
+include AcquireSchema<Feature>;
+ensures spec_has_delegate_cap<Feature>(addr) ==> result.root == root_addr;
+ensures !spec_has_delegate_cap<Feature>(addr) ==> result.root == addr;
+
+ + + + + +### Function `acquire_linear` + + +
public fun acquire_linear<Feature>(requester: &signer, _feature_witness: &Feature): capability::LinearCap<Feature>
+
+ + + + +
let addr = signer::address_of(requester);
+let root_addr = global<CapDelegateState<Feature>>(addr).root;
+include AcquireSchema<Feature>;
+ensures spec_has_delegate_cap<Feature>(addr) ==> result.root == root_addr;
+ensures !spec_has_delegate_cap<Feature>(addr) ==> result.root == addr;
+
+ + + + + + + +
schema AcquireSchema<Feature> {
+    addr: address;
+    root_addr: address;
+    aborts_if spec_has_delegate_cap<Feature>(addr) && !spec_has_cap<Feature>(root_addr);
+    aborts_if spec_has_delegate_cap<Feature>(addr) && !vector::spec_contains(spec_delegates<Feature>(root_addr), addr);
+    aborts_if !spec_has_delegate_cap<Feature>(addr) && !spec_has_cap<Feature>(addr);
+}
+
+ + + + + +### Function `delegate` + + +
public fun delegate<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
+ + + + +
let addr = signer::address_of(to);
+ensures spec_has_delegate_cap<Feature>(addr);
+ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> global<CapDelegateState<Feature>>(addr).root == self.root;
+ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> vector::spec_contains(spec_delegates<Feature>(self.root), addr);
+
+ + + + + +### Function `revoke` + + +
public fun revoke<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
+
+ + + + +
ensures !spec_has_delegate_cap<Feature>(from);
+
+ + + + + +### Function `remove_element` + + +
fun remove_element<E: drop>(v: &mut vector<E>, x: &E)
+
+ + + + + + +### Function `add_element` + + +
fun add_element<E: drop>(v: &mut vector<E>, x: E)
+
+ + + + +
ensures vector::spec_contains(v, x);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/comparator.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/comparator.md new file mode 100644 index 0000000000000..0b927d30ac44b --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/comparator.md @@ -0,0 +1,387 @@ + + + +# Module `0x1::comparator` + +Provides a framework for comparing two elements + + +- [Struct `Result`](#0x1_comparator_Result) +- [Constants](#@Constants_0) +- [Function `is_equal`](#0x1_comparator_is_equal) +- [Function `is_smaller_than`](#0x1_comparator_is_smaller_than) +- [Function `is_greater_than`](#0x1_comparator_is_greater_than) +- [Function `compare`](#0x1_comparator_compare) +- [Function `compare_u8_vector`](#0x1_comparator_compare_u8_vector) +- [Specification](#@Specification_1) + - [Struct `Result`](#@Specification_1_Result) + - [Function `is_equal`](#@Specification_1_is_equal) + - [Function `is_smaller_than`](#@Specification_1_is_smaller_than) + - [Function `is_greater_than`](#@Specification_1_is_greater_than) + - [Function `compare`](#@Specification_1_compare) + - [Function `compare_u8_vector`](#@Specification_1_compare_u8_vector) + + +
use 0x1::bcs;
+
+ + + + + +## Struct `Result` + + + +
struct Result has drop
+
+ + + +
+Fields + + +
+
+inner: u8 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EQUAL: u8 = 0;
+
+ + + + + + + +
const GREATER: u8 = 2;
+
+ + + + + + + +
const SMALLER: u8 = 1;
+
+ + + + + +## Function `is_equal` + + + +
public fun is_equal(self: &comparator::Result): bool
+
+ + + +
+Implementation + + +
public fun is_equal(self: &Result): bool {
+    self.inner == EQUAL
+}
+
+ + + +
+ + + +## Function `is_smaller_than` + + + +
public fun is_smaller_than(self: &comparator::Result): bool
+
+ + + +
+Implementation + + +
public fun is_smaller_than(self: &Result): bool {
+    self.inner == SMALLER
+}
+
+ + + +
+ + + +## Function `is_greater_than` + + + +
public fun is_greater_than(self: &comparator::Result): bool
+
+ + + +
+Implementation + + +
public fun is_greater_than(self: &Result): bool {
+    self.inner == GREATER
+}
+
+ + + +
+ + + +## Function `compare` + + + +
public fun compare<T>(left: &T, right: &T): comparator::Result
+
+ + + +
+Implementation + + +
public fun compare<T>(left: &T, right: &T): Result {
+    let left_bytes = bcs::to_bytes(left);
+    let right_bytes = bcs::to_bytes(right);
+
+    compare_u8_vector(left_bytes, right_bytes)
+}
+
+ + + +
+ + + +## Function `compare_u8_vector` + + + +
public fun compare_u8_vector(left: vector<u8>, right: vector<u8>): comparator::Result
+
+ + + +
+Implementation + + +
public fun compare_u8_vector(left: vector<u8>, right: vector<u8>): Result {
+    let left_length = vector::length(&left);
+    let right_length = vector::length(&right);
+
+    let idx = 0;
+
+    while (idx < left_length && idx < right_length) {
+        let left_byte = *vector::borrow(&left, idx);
+        let right_byte = *vector::borrow(&right, idx);
+
+        if (left_byte < right_byte) {
+            return Result { inner: SMALLER }
+        } else if (left_byte > right_byte) {
+            return Result { inner: GREATER }
+        };
+        idx = idx + 1;
+    };
+
+    if (left_length < right_length) {
+        Result { inner: SMALLER }
+    } else if (left_length > right_length) {
+        Result { inner: GREATER }
+    } else {
+        Result { inner: EQUAL }
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `Result` + + +
struct Result has drop
+
+ + + +
+
+inner: u8 +
+
+ +
+
+ + + +
invariant inner == EQUAL || inner == SMALLER || inner == GREATER;
+
+ + + + + +### Function `is_equal` + + +
public fun is_equal(self: &comparator::Result): bool
+
+ + + + +
aborts_if false;
+let res = self;
+ensures result == (res.inner == EQUAL);
+
+ + + + + +### Function `is_smaller_than` + + +
public fun is_smaller_than(self: &comparator::Result): bool
+
+ + + + +
aborts_if false;
+let res = self;
+ensures result == (res.inner == SMALLER);
+
+ + + + + +### Function `is_greater_than` + + +
public fun is_greater_than(self: &comparator::Result): bool
+
+ + + + +
aborts_if false;
+let res = self;
+ensures result == (res.inner == GREATER);
+
+ + + + + +### Function `compare` + + +
public fun compare<T>(left: &T, right: &T): comparator::Result
+
+ + + + +
let left_bytes = bcs::to_bytes(left);
+let right_bytes = bcs::to_bytes(right);
+ensures result == spec_compare_u8_vector(left_bytes, right_bytes);
+
+ + + + + + + +
fun spec_compare_u8_vector(left: vector<u8>, right: vector<u8>): Result;
+
+ + + + + +### Function `compare_u8_vector` + + +
public fun compare_u8_vector(left: vector<u8>, right: vector<u8>): comparator::Result
+
+ + + + +
pragma unroll = 5;
+pragma opaque;
+aborts_if false;
+let left_length = len(left);
+let right_length = len(right);
+ensures (result.inner == EQUAL) ==> (
+    (left_length == right_length) &&
+        (forall i: u64 where i < left_length: left[i] == right[i])
+);
+ensures (result.inner == SMALLER) ==> (
+    (exists i: u64 where i < left_length:
+        (i < right_length) &&
+            (left[i] < right[i]) &&
+            (forall j: u64 where j < i: left[j] == right[j])
+    ) ||
+        (left_length < right_length)
+);
+ensures (result.inner == GREATER) ==> (
+    (exists i: u64 where i < left_length:
+        (i < right_length) &&
+            (left[i] > right[i]) &&
+            (forall j: u64 where j < i: left[j] == right[j])
+    ) ||
+        (left_length > right_length)
+);
+ensures [abstract] result == spec_compare_u8_vector(left, right);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/copyable_any.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/copyable_any.md new file mode 100644 index 0000000000000..a56a9a2621b2c --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/copyable_any.md @@ -0,0 +1,230 @@ + + + +# Module `0x1::copyable_any` + + + +- [Struct `Any`](#0x1_copyable_any_Any) +- [Constants](#@Constants_0) +- [Function `pack`](#0x1_copyable_any_pack) +- [Function `unpack`](#0x1_copyable_any_unpack) +- [Function `type_name`](#0x1_copyable_any_type_name) +- [Specification](#@Specification_1) + - [Function `pack`](#@Specification_1_pack) + - [Function `unpack`](#@Specification_1_unpack) + - [Function `type_name`](#@Specification_1_type_name) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::from_bcs;
+use 0x1::string;
+use 0x1::type_info;
+
+ + + + + +## Struct `Any` + +The same as any::Any but with the copy ability. + + +
struct Any has copy, drop, store
+
+ + + +
+Fields + + +
+
+type_name: string::String +
+
+ +
+
+data: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The type provided for unpack is not the same as was given for pack. + + +
const ETYPE_MISMATCH: u64 = 0;
+
+ + + + + +## Function `pack` + +Pack a value into the Any representation. Because Any can be stored, dropped, and copied this is +also required from T. + + +
public fun pack<T: copy, drop, store>(x: T): copyable_any::Any
+
+ + + +
+Implementation + + +
public fun pack<T: drop + store + copy>(x: T): Any {
+    Any {
+        type_name: type_info::type_name<T>(),
+        data: bcs::to_bytes(&x)
+    }
+}
+
+ + + +
+ + + +## Function `unpack` + +Unpack a value from the Any representation. This aborts if the value has not the expected type T. + + +
public fun unpack<T>(self: copyable_any::Any): T
+
+ + + +
+Implementation + + +
public fun unpack<T>(self: Any): T {
+    assert!(type_info::type_name<T>() == self.type_name, error::invalid_argument(ETYPE_MISMATCH));
+    from_bytes<T>(self.data)
+}
+
+ + + +
+ + + +## Function `type_name` + +Returns the type name of this Any + + +
public fun type_name(self: &copyable_any::Any): &string::String
+
+ + + +
+Implementation + + +
public fun type_name(self: &Any): &String {
+    &self.type_name
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `pack` + + +
public fun pack<T: copy, drop, store>(x: T): copyable_any::Any
+
+ + + + +
aborts_if false;
+pragma opaque;
+ensures result == Any {
+    type_name: type_info::type_name<T>(),
+    data: bcs::serialize<T>(x)
+};
+ensures [abstract] from_bcs::deserializable<T>(result.data);
+
+ + + + + +### Function `unpack` + + +
public fun unpack<T>(self: copyable_any::Any): T
+
+ + + + +
include UnpackAbortsIf<T>;
+ensures result == from_bcs::deserialize<T>(self.data);
+
+ + + + + + + +
schema UnpackAbortsIf<T> {
+    self: Any;
+    aborts_if type_info::type_name<T>() != self.type_name;
+    aborts_if !from_bcs::deserializable<T>(self.data);
+}
+
+ + + + + +### Function `type_name` + + +
public fun type_name(self: &copyable_any::Any): &string::String
+
+ + + + +
aborts_if false;
+ensures result == self.type_name;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/crypto_algebra.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/crypto_algebra.md new file mode 100644 index 0000000000000..4aa0fab2c7833 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/crypto_algebra.md @@ -0,0 +1,1755 @@ + + + +# Module `0x1::crypto_algebra` + +This module provides generic structs/functions for operations of algebraic structures (e.g. fields and groups), +which can be used to build generic cryptographic schemes atop. +E.g., a Groth16 ZK proof verifier can be built to work over any pairing supported in this module. + +In general, every structure implements basic operations like (de)serialization, equality check, random sampling. + +A group may also implement the following operations. (Additive group notation is assumed.) +- order() for getting the group order. +- zero() for getting the group identity. +- one() for getting the group generator (if exists). +- neg() for group element inversion. +- add() for group operation (i.e., a group addition). +- sub() for group element subtraction. +- double() for efficient doubling. +- scalar_mul() for group scalar multiplication. +- multi_scalar_mul() for efficient group multi-scalar multiplication. +- hash_to() for hash-to-group. + +A field may also implement the following operations. +- zero() for getting the field additive identity. +- one() for getting the field multiplicative identity. +- add() for field addition. +- sub() for field subtraction. +- mul() for field multiplication. +- div() for field division. +- neg() for field negation. +- inv() for field inversion. +- sqr() for efficient field element squaring. +- from_u64() for quick conversion from u64 to field element. + +For 3 groups that admit a bilinear map, pairing() and multi_pairing() may be implemented. + +For a subset/superset relationship between 2 structures, upcast() and downcast() may be implemented. +E.g., in BLS12-381 pairing, since Gt is a subset of Fq12, +upcast<Gt, Fq12>() and downcast<Fq12, Gt>() will be supported. + +See *_algebra.move for currently implemented algebraic structures. + + +- [Struct `Element`](#0x1_crypto_algebra_Element) +- [Constants](#@Constants_0) +- [Function `eq`](#0x1_crypto_algebra_eq) +- [Function `from_u64`](#0x1_crypto_algebra_from_u64) +- [Function `zero`](#0x1_crypto_algebra_zero) +- [Function `one`](#0x1_crypto_algebra_one) +- [Function `neg`](#0x1_crypto_algebra_neg) +- [Function `add`](#0x1_crypto_algebra_add) +- [Function `sub`](#0x1_crypto_algebra_sub) +- [Function `mul`](#0x1_crypto_algebra_mul) +- [Function `div`](#0x1_crypto_algebra_div) +- [Function `sqr`](#0x1_crypto_algebra_sqr) +- [Function `inv`](#0x1_crypto_algebra_inv) +- [Function `double`](#0x1_crypto_algebra_double) +- [Function `multi_scalar_mul`](#0x1_crypto_algebra_multi_scalar_mul) +- [Function `scalar_mul`](#0x1_crypto_algebra_scalar_mul) +- [Function `multi_pairing`](#0x1_crypto_algebra_multi_pairing) +- [Function `pairing`](#0x1_crypto_algebra_pairing) +- [Function `deserialize`](#0x1_crypto_algebra_deserialize) +- [Function `serialize`](#0x1_crypto_algebra_serialize) +- [Function `order`](#0x1_crypto_algebra_order) +- [Function `upcast`](#0x1_crypto_algebra_upcast) +- [Function `downcast`](#0x1_crypto_algebra_downcast) +- [Function `hash_to`](#0x1_crypto_algebra_hash_to) +- [Function `abort_unless_cryptography_algebra_natives_enabled`](#0x1_crypto_algebra_abort_unless_cryptography_algebra_natives_enabled) +- [Function `handles_from_elements`](#0x1_crypto_algebra_handles_from_elements) +- [Function `add_internal`](#0x1_crypto_algebra_add_internal) +- [Function `deserialize_internal`](#0x1_crypto_algebra_deserialize_internal) +- [Function `div_internal`](#0x1_crypto_algebra_div_internal) +- [Function `double_internal`](#0x1_crypto_algebra_double_internal) +- [Function `downcast_internal`](#0x1_crypto_algebra_downcast_internal) +- [Function `from_u64_internal`](#0x1_crypto_algebra_from_u64_internal) +- [Function `eq_internal`](#0x1_crypto_algebra_eq_internal) +- [Function `hash_to_internal`](#0x1_crypto_algebra_hash_to_internal) +- [Function `inv_internal`](#0x1_crypto_algebra_inv_internal) +- [Function `mul_internal`](#0x1_crypto_algebra_mul_internal) +- [Function `multi_pairing_internal`](#0x1_crypto_algebra_multi_pairing_internal) +- [Function `multi_scalar_mul_internal`](#0x1_crypto_algebra_multi_scalar_mul_internal) +- [Function `neg_internal`](#0x1_crypto_algebra_neg_internal) +- [Function `one_internal`](#0x1_crypto_algebra_one_internal) +- [Function `order_internal`](#0x1_crypto_algebra_order_internal) +- [Function `pairing_internal`](#0x1_crypto_algebra_pairing_internal) +- [Function `scalar_mul_internal`](#0x1_crypto_algebra_scalar_mul_internal) +- [Function `serialize_internal`](#0x1_crypto_algebra_serialize_internal) +- [Function `sqr_internal`](#0x1_crypto_algebra_sqr_internal) +- [Function `sub_internal`](#0x1_crypto_algebra_sub_internal) +- [Function `upcast_internal`](#0x1_crypto_algebra_upcast_internal) +- [Function `zero_internal`](#0x1_crypto_algebra_zero_internal) +- [Specification](#@Specification_1) + - [Function `handles_from_elements`](#@Specification_1_handles_from_elements) + - [Function `add_internal`](#@Specification_1_add_internal) + - [Function `deserialize_internal`](#@Specification_1_deserialize_internal) + - [Function `div_internal`](#@Specification_1_div_internal) + - [Function `double_internal`](#@Specification_1_double_internal) + - [Function `downcast_internal`](#@Specification_1_downcast_internal) + - [Function `from_u64_internal`](#@Specification_1_from_u64_internal) + - [Function `eq_internal`](#@Specification_1_eq_internal) + - [Function `hash_to_internal`](#@Specification_1_hash_to_internal) + - [Function `inv_internal`](#@Specification_1_inv_internal) + - [Function `mul_internal`](#@Specification_1_mul_internal) + - [Function `multi_pairing_internal`](#@Specification_1_multi_pairing_internal) + - [Function `multi_scalar_mul_internal`](#@Specification_1_multi_scalar_mul_internal) + - [Function `neg_internal`](#@Specification_1_neg_internal) + - [Function `one_internal`](#@Specification_1_one_internal) + - [Function `order_internal`](#@Specification_1_order_internal) + - [Function `pairing_internal`](#@Specification_1_pairing_internal) + - [Function `scalar_mul_internal`](#@Specification_1_scalar_mul_internal) + - [Function `serialize_internal`](#@Specification_1_serialize_internal) + - [Function `sqr_internal`](#@Specification_1_sqr_internal) + - [Function `sub_internal`](#@Specification_1_sub_internal) + - [Function `upcast_internal`](#@Specification_1_upcast_internal) + - [Function `zero_internal`](#@Specification_1_zero_internal) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::option;
+
+ + + + + +## Struct `Element` + +This struct represents an element of a structure S. + + +
struct Element<S> has copy, drop
+
+ + + +
+Fields + + +
+
+handle: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const E_NON_EQUAL_LENGTHS: u64 = 2;
+
+ + + + + + + +
const E_NOT_IMPLEMENTED: u64 = 1;
+
+ + + + + + + +
const E_TOO_MUCH_MEMORY_USED: u64 = 3;
+
+ + + + + +## Function `eq` + +Check if x == y for elements x and y of a structure S. + + +
public fun eq<S>(x: &crypto_algebra::Element<S>, y: &crypto_algebra::Element<S>): bool
+
+ + + +
+Implementation + + +
public fun eq<S>(x: &Element<S>, y: &Element<S>): bool {
+    abort_unless_cryptography_algebra_natives_enabled();
+    eq_internal<S>(x.handle, y.handle)
+}
+
+ + + +
+ + + +## Function `from_u64` + +Convert a u64 to an element of a structure S. + + +
public fun from_u64<S>(value: u64): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun from_u64<S>(value: u64): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: from_u64_internal<S>(value)
+    }
+}
+
+ + + +
+ + + +## Function `zero` + +Return the additive identity of field S, or the identity of group S. + + +
public fun zero<S>(): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun zero<S>(): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: zero_internal<S>()
+    }
+}
+
+ + + +
+ + + +## Function `one` + +Return the multiplicative identity of field S, or a fixed generator of group S. + + +
public fun one<S>(): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun one<S>(): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: one_internal<S>()
+    }
+}
+
+ + + +
+ + + +## Function `neg` + +Compute -x for an element x of a structure S. + + +
public fun neg<S>(x: &crypto_algebra::Element<S>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun neg<S>(x: &Element<S>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: neg_internal<S>(x.handle)
+    }
+}
+
+ + + +
+ + + +## Function `add` + +Compute x + y for elements x and y of structure S. + + +
public fun add<S>(x: &crypto_algebra::Element<S>, y: &crypto_algebra::Element<S>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun add<S>(x: &Element<S>, y: &Element<S>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: add_internal<S>(x.handle, y.handle)
+    }
+}
+
+ + + +
+ + + +## Function `sub` + +Compute x - y for elements x and y of a structure S. + + +
public fun sub<S>(x: &crypto_algebra::Element<S>, y: &crypto_algebra::Element<S>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun sub<S>(x: &Element<S>, y: &Element<S>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: sub_internal<S>(x.handle, y.handle)
+    }
+}
+
+ + + +
+ + + +## Function `mul` + +Compute x * y for elements x and y of a structure S. + + +
public fun mul<S>(x: &crypto_algebra::Element<S>, y: &crypto_algebra::Element<S>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun mul<S>(x: &Element<S>, y: &Element<S>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: mul_internal<S>(x.handle, y.handle)
+    }
+}
+
+ + + +
+ + + +## Function `div` + +Try computing x / y for elements x and y of a structure S. +Return none if y does not have a multiplicative inverse in the structure S +(e.g., when S is a field, and y is zero). + + +
public fun div<S>(x: &crypto_algebra::Element<S>, y: &crypto_algebra::Element<S>): option::Option<crypto_algebra::Element<S>>
+
+ + + +
+Implementation + + +
public fun div<S>(x: &Element<S>, y: &Element<S>): Option<Element<S>> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    let (succ, handle) = div_internal<S>(x.handle, y.handle);
+    if (succ) {
+        some(Element<S> { handle })
+    } else {
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `sqr` + +Compute x^2 for an element x of a structure S. Faster and cheaper than mul(x, x). + + +
public fun sqr<S>(x: &crypto_algebra::Element<S>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun sqr<S>(x: &Element<S>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: sqr_internal<S>(x.handle)
+    }
+}
+
+ + + +
+ + + +## Function `inv` + +Try computing x^(-1) for an element x of a structure S. +Return none if x does not have a multiplicative inverse in the structure S +(e.g., when S is a field, and x is zero). + + +
public fun inv<S>(x: &crypto_algebra::Element<S>): option::Option<crypto_algebra::Element<S>>
+
+ + + +
+Implementation + + +
public fun inv<S>(x: &Element<S>): Option<Element<S>> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    let (succeeded, handle) = inv_internal<S>(x.handle);
+    if (succeeded) {
+        let scalar = Element<S> { handle };
+        some(scalar)
+    } else {
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `double` + +Compute 2*P for an element P of a structure S. Faster and cheaper than add(P, P). + + +
public fun double<S>(element_p: &crypto_algebra::Element<S>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun double<S>(element_p: &Element<S>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<S> {
+        handle: double_internal<S>(element_p.handle)
+    }
+}
+
+ + + +
+ + + +## Function `multi_scalar_mul` + +Compute k[0]*P[0]+...+k[n-1]*P[n-1], where +P[] are n elements of group G represented by parameter elements, and +k[] are n elements of the scalarfield S of group G represented by parameter scalars. + +Abort with code std::error::invalid_argument(E_NON_EQUAL_LENGTHS) if the sizes of elements and scalars do not match. + + +
public fun multi_scalar_mul<G, S>(elements: &vector<crypto_algebra::Element<G>>, scalars: &vector<crypto_algebra::Element<S>>): crypto_algebra::Element<G>
+
+ + + +
+Implementation + + +
public fun multi_scalar_mul<G, S>(elements: &vector<Element<G>>, scalars: &vector<Element<S>>): Element<G> {
+    let element_handles = handles_from_elements(elements);
+    let scalar_handles = handles_from_elements(scalars);
+    Element<G> {
+        handle: multi_scalar_mul_internal<G, S>(element_handles, scalar_handles)
+    }
+}
+
+ + + +
+ + + +## Function `scalar_mul` + +Compute k*P, where P is an element of a group G and k is an element of the scalar field S associated to the group G. + + +
public fun scalar_mul<G, S>(element_p: &crypto_algebra::Element<G>, scalar_k: &crypto_algebra::Element<S>): crypto_algebra::Element<G>
+
+ + + +
+Implementation + + +
public fun scalar_mul<G, S>(element_p: &Element<G>, scalar_k: &Element<S>): Element<G> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<G> {
+        handle: scalar_mul_internal<G, S>(element_p.handle, scalar_k.handle)
+    }
+}
+
+ + + +
+ + + +## Function `multi_pairing` + +Efficiently compute e(P[0],Q[0])+...+e(P[n-1],Q[n-1]), +where e: (G1,G2) -> (Gt) is the pairing function from groups (G1,G2) to group Gt, +P[] are n elements of group G1 represented by parameter g1_elements, and +Q[] are n elements of group G2 represented by parameter g2_elements. + +Abort with code std::error::invalid_argument(E_NON_EQUAL_LENGTHS) if the sizes of g1_elements and g2_elements do not match. + +NOTE: we are viewing the target group Gt of the pairing as an additive group, +rather than a multiplicative one (which is typically the case). + + +
public fun multi_pairing<G1, G2, Gt>(g1_elements: &vector<crypto_algebra::Element<G1>>, g2_elements: &vector<crypto_algebra::Element<G2>>): crypto_algebra::Element<Gt>
+
+ + + +
+Implementation + + +
public fun multi_pairing<G1,G2,Gt>(g1_elements: &vector<Element<G1>>, g2_elements: &vector<Element<G2>>): Element<Gt> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    let g1_handles = handles_from_elements(g1_elements);
+    let g2_handles = handles_from_elements(g2_elements);
+    Element<Gt> {
+        handle: multi_pairing_internal<G1,G2,Gt>(g1_handles, g2_handles)
+    }
+}
+
+ + + +
+ + + +## Function `pairing` + +Compute the pairing function (a.k.a., bilinear map) on a G1 element and a G2 element. +Return an element in the target group Gt. + + +
public fun pairing<G1, G2, Gt>(element_1: &crypto_algebra::Element<G1>, element_2: &crypto_algebra::Element<G2>): crypto_algebra::Element<Gt>
+
+ + + +
+Implementation + + +
public fun pairing<G1,G2,Gt>(element_1: &Element<G1>, element_2: &Element<G2>): Element<Gt> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<Gt> {
+        handle: pairing_internal<G1,G2,Gt>(element_1.handle, element_2.handle)
+    }
+}
+
+ + + +
+ + + +## Function `deserialize` + +Try deserializing a byte array to an element of an algebraic structure S using a given serialization format F. +Return none if the deserialization failed. + + +
public fun deserialize<S, F>(bytes: &vector<u8>): option::Option<crypto_algebra::Element<S>>
+
+ + + +
+Implementation + + +
public fun deserialize<S, F>(bytes: &vector<u8>): Option<Element<S>> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    let (succeeded, handle) = deserialize_internal<S, F>(bytes);
+    if (succeeded) {
+        some(Element<S> { handle })
+    } else {
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `serialize` + +Serialize an element of an algebraic structure S to a byte array using a given serialization format F. + + +
public fun serialize<S, F>(element: &crypto_algebra::Element<S>): vector<u8>
+
+ + + +
+Implementation + + +
public fun serialize<S, F>(element: &Element<S>): vector<u8> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    serialize_internal<S, F>(element.handle)
+}
+
+ + + +
+ + + +## Function `order` + +Get the order of structure S, a big integer little-endian encoded as a byte array. + + +
public fun order<S>(): vector<u8>
+
+ + + +
+Implementation + + +
public fun order<S>(): vector<u8> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    order_internal<S>()
+}
+
+ + + +
+ + + +## Function `upcast` + +Cast an element of a structure S to a parent structure L. + + +
public fun upcast<S, L>(element: &crypto_algebra::Element<S>): crypto_algebra::Element<L>
+
+ + + +
+Implementation + + +
public fun upcast<S,L>(element: &Element<S>): Element<L> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element<L> {
+        handle: upcast_internal<S,L>(element.handle)
+    }
+}
+
+ + + +
+ + + +## Function `downcast` + +Try casting an element x of a structure L to a sub-structure S. +Return none if x is not a member of S. + +NOTE: Membership check in S is performed inside, which can be expensive, depending on the structures L and S. + + +
public fun downcast<L, S>(element_x: &crypto_algebra::Element<L>): option::Option<crypto_algebra::Element<S>>
+
+ + + +
+Implementation + + +
public fun downcast<L,S>(element_x: &Element<L>): Option<Element<S>> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    let (succ, new_handle) = downcast_internal<L,S>(element_x.handle);
+    if (succ) {
+        some(Element<S> { handle: new_handle })
+    } else {
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `hash_to` + +Hash an arbitrary-length byte array msg into structure S with a domain separation tag dst +using the given hash-to-structure suite H. + +NOTE: some hashing methods do not accept a dst and will abort if a non-empty one is provided. + + +
public fun hash_to<S, H>(dst: &vector<u8>, msg: &vector<u8>): crypto_algebra::Element<S>
+
+ + + +
+Implementation + + +
public fun hash_to<S, H>(dst: &vector<u8>, msg: &vector<u8>): Element<S> {
+    abort_unless_cryptography_algebra_natives_enabled();
+    Element {
+        handle: hash_to_internal<S, H>(dst, msg)
+    }
+}
+
+ + + +
+ + + +## Function `abort_unless_cryptography_algebra_natives_enabled` + + + +
fun abort_unless_cryptography_algebra_natives_enabled()
+
+ + + +
+Implementation + + +
fun abort_unless_cryptography_algebra_natives_enabled() {
+    if (features::cryptography_algebra_enabled()) return;
+    abort(std::error::not_implemented(0))
+}
+
+ + + +
+ + + +## Function `handles_from_elements` + + + +
fun handles_from_elements<S>(elements: &vector<crypto_algebra::Element<S>>): vector<u64>
+
+ + + +
+Implementation + + +
fun handles_from_elements<S>(elements: &vector<Element<S>>): vector<u64> {
+    let num_elements = std::vector::length(elements);
+    let element_handles = std::vector::empty();
+    let i = 0;
+    while ({
+        spec {
+            invariant len(element_handles) == i;
+            invariant forall k in 0..i: element_handles[k] == elements[k].handle;
+        };
+        i < num_elements
+    }) {
+        std::vector::push_back(&mut element_handles, std::vector::borrow(elements, i).handle);
+        i = i + 1;
+    };
+    element_handles
+}
+
+ + + +
+ + + +## Function `add_internal` + + + +
fun add_internal<S>(handle_1: u64, handle_2: u64): u64
+
+ + + +
+Implementation + + +
native fun add_internal<S>(handle_1: u64, handle_2: u64): u64;
+
+ + + +
+ + + +## Function `deserialize_internal` + + + +
fun deserialize_internal<S, F>(bytes: &vector<u8>): (bool, u64)
+
+ + + +
+Implementation + + +
native fun deserialize_internal<S, F>(bytes: &vector<u8>): (bool, u64);
+
+ + + +
+ + + +## Function `div_internal` + + + +
fun div_internal<F>(handle_1: u64, handle_2: u64): (bool, u64)
+
+ + + +
+Implementation + + +
native fun div_internal<F>(handle_1: u64, handle_2: u64): (bool, u64);
+
+ + + +
+ + + +## Function `double_internal` + + + +
fun double_internal<G>(element_handle: u64): u64
+
+ + + +
+Implementation + + +
native fun double_internal<G>(element_handle: u64): u64;
+
+ + + +
+ + + +## Function `downcast_internal` + + + +
fun downcast_internal<L, S>(handle: u64): (bool, u64)
+
+ + + +
+Implementation + + +
native fun downcast_internal<L,S>(handle: u64): (bool, u64);
+
+ + + +
+ + + +## Function `from_u64_internal` + + + +
fun from_u64_internal<S>(value: u64): u64
+
+ + + +
+Implementation + + +
native fun from_u64_internal<S>(value: u64): u64;
+
+ + + +
+ + + +## Function `eq_internal` + + + +
fun eq_internal<S>(handle_1: u64, handle_2: u64): bool
+
+ + + +
+Implementation + + +
native fun eq_internal<S>(handle_1: u64, handle_2: u64): bool;
+
+ + + +
+ + + +## Function `hash_to_internal` + + + +
fun hash_to_internal<S, H>(dst: &vector<u8>, bytes: &vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun hash_to_internal<S, H>(dst: &vector<u8>, bytes: &vector<u8>): u64;
+
+ + + +
+ + + +## Function `inv_internal` + + + +
fun inv_internal<F>(handle: u64): (bool, u64)
+
+ + + +
+Implementation + + +
native fun inv_internal<F>(handle: u64): (bool, u64);
+
+ + + +
+ + + +## Function `mul_internal` + + + +
fun mul_internal<F>(handle_1: u64, handle_2: u64): u64
+
+ + + +
+Implementation + + +
native fun mul_internal<F>(handle_1: u64, handle_2: u64): u64;
+
+ + + +
+ + + +## Function `multi_pairing_internal` + + + +
fun multi_pairing_internal<G1, G2, Gt>(g1_handles: vector<u64>, g2_handles: vector<u64>): u64
+
+ + + +
+Implementation + + +
native fun multi_pairing_internal<G1,G2,Gt>(g1_handles: vector<u64>, g2_handles: vector<u64>): u64;
+
+ + + +
+ + + +## Function `multi_scalar_mul_internal` + + + +
fun multi_scalar_mul_internal<G, S>(element_handles: vector<u64>, scalar_handles: vector<u64>): u64
+
+ + + +
+Implementation + + +
native fun multi_scalar_mul_internal<G, S>(element_handles: vector<u64>, scalar_handles: vector<u64>): u64;
+
+ + + +
+ + + +## Function `neg_internal` + + + +
fun neg_internal<F>(handle: u64): u64
+
+ + + +
+Implementation + + +
native fun neg_internal<F>(handle: u64): u64;
+
+ + + +
+ + + +## Function `one_internal` + + + +
fun one_internal<S>(): u64
+
+ + + +
+Implementation + + +
native fun one_internal<S>(): u64;
+
+ + + +
+ + + +## Function `order_internal` + + + +
fun order_internal<G>(): vector<u8>
+
+ + + +
+Implementation + + +
native fun order_internal<G>(): vector<u8>;
+
+ + + +
+ + + +## Function `pairing_internal` + + + +
fun pairing_internal<G1, G2, Gt>(g1_handle: u64, g2_handle: u64): u64
+
+ + + +
+Implementation + + +
native fun pairing_internal<G1,G2,Gt>(g1_handle: u64, g2_handle: u64): u64;
+
+ + + +
+ + + +## Function `scalar_mul_internal` + + + +
fun scalar_mul_internal<G, S>(element_handle: u64, scalar_handle: u64): u64
+
+ + + +
+Implementation + + +
native fun scalar_mul_internal<G, S>(element_handle: u64, scalar_handle: u64): u64;
+
+ + + +
+ + + +## Function `serialize_internal` + + + +
fun serialize_internal<S, F>(handle: u64): vector<u8>
+
+ + + +
+Implementation + + +
native fun serialize_internal<S, F>(handle: u64): vector<u8>;
+
+ + + +
+ + + +## Function `sqr_internal` + + + +
fun sqr_internal<G>(handle: u64): u64
+
+ + + +
+Implementation + + +
native fun sqr_internal<G>(handle: u64): u64;
+
+ + + +
+ + + +## Function `sub_internal` + + + +
fun sub_internal<G>(handle_1: u64, handle_2: u64): u64
+
+ + + +
+Implementation + + +
native fun sub_internal<G>(handle_1: u64, handle_2: u64): u64;
+
+ + + +
+ + + +## Function `upcast_internal` + + + +
fun upcast_internal<S, L>(handle: u64): u64
+
+ + + +
+Implementation + + +
native fun upcast_internal<S,L>(handle: u64): u64;
+
+ + + +
+ + + +## Function `zero_internal` + + + +
fun zero_internal<S>(): u64
+
+ + + +
+Implementation + + +
native fun zero_internal<S>(): u64;
+
+ + + +
+ + + +## Specification + + + + +### Function `handles_from_elements` + + +
fun handles_from_elements<S>(elements: &vector<crypto_algebra::Element<S>>): vector<u64>
+
+ + + + +
aborts_if false;
+ensures forall i in 0..len(elements): result[i] == elements[i].handle;
+
+ + + + + +### Function `add_internal` + + +
fun add_internal<S>(handle_1: u64, handle_2: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `deserialize_internal` + + +
fun deserialize_internal<S, F>(bytes: &vector<u8>): (bool, u64)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `div_internal` + + +
fun div_internal<F>(handle_1: u64, handle_2: u64): (bool, u64)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `double_internal` + + +
fun double_internal<G>(element_handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `downcast_internal` + + +
fun downcast_internal<L, S>(handle: u64): (bool, u64)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `from_u64_internal` + + +
fun from_u64_internal<S>(value: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `eq_internal` + + +
fun eq_internal<S>(handle_1: u64, handle_2: u64): bool
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `hash_to_internal` + + +
fun hash_to_internal<S, H>(dst: &vector<u8>, bytes: &vector<u8>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `inv_internal` + + +
fun inv_internal<F>(handle: u64): (bool, u64)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `mul_internal` + + +
fun mul_internal<F>(handle_1: u64, handle_2: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `multi_pairing_internal` + + +
fun multi_pairing_internal<G1, G2, Gt>(g1_handles: vector<u64>, g2_handles: vector<u64>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `multi_scalar_mul_internal` + + +
fun multi_scalar_mul_internal<G, S>(element_handles: vector<u64>, scalar_handles: vector<u64>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `neg_internal` + + +
fun neg_internal<F>(handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `one_internal` + + +
fun one_internal<S>(): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `order_internal` + + +
fun order_internal<G>(): vector<u8>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `pairing_internal` + + +
fun pairing_internal<G1, G2, Gt>(g1_handle: u64, g2_handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `scalar_mul_internal` + + +
fun scalar_mul_internal<G, S>(element_handle: u64, scalar_handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `serialize_internal` + + +
fun serialize_internal<S, F>(handle: u64): vector<u8>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `sqr_internal` + + +
fun sqr_internal<G>(handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `sub_internal` + + +
fun sub_internal<G>(handle_1: u64, handle_2: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `upcast_internal` + + +
fun upcast_internal<S, L>(handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `zero_internal` + + +
fun zero_internal<S>(): u64
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/debug.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/debug.md new file mode 100644 index 0000000000000..3fe59b4b97ca4 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/debug.md @@ -0,0 +1,237 @@ + + + +# Module `0x1::debug` + +Module providing debug functionality. + + +- [Constants](#@Constants_0) +- [Function `print`](#0x1_debug_print) +- [Function `print_stack_trace`](#0x1_debug_print_stack_trace) +- [Function `format`](#0x1_debug_format) +- [Function `native_print`](#0x1_debug_native_print) +- [Function `native_stack_trace`](#0x1_debug_native_stack_trace) +- [Specification](#@Specification_1) + - [Function `print`](#@Specification_1_print) + - [Function `print_stack_trace`](#@Specification_1_print_stack_trace) + - [Function `native_print`](#@Specification_1_native_print) + - [Function `native_stack_trace`](#@Specification_1_native_stack_trace) + + +
use 0x1::string;
+use 0x1::string_utils;
+
+ + + + + +## Constants + + + + + + +
const MSG_1: vector<u8> = [97, 98, 99, 100, 101, 102];
+
+ + + + + + + +
const MSG_2: vector<u8> = [49, 50, 51, 52, 53, 54];
+
+ + + + + +## Function `print` + + + +
public fun print<T>(x: &T)
+
+ + + +
+Implementation + + +
public fun print<T>(x: &T) {
+    native_print(format(x));
+}
+
+ + + +
+ + + +## Function `print_stack_trace` + + + +
public fun print_stack_trace()
+
+ + + +
+Implementation + + +
public fun print_stack_trace() {
+    native_print(native_stack_trace());
+}
+
+ + + +
+ + + +## Function `format` + + + +
fun format<T>(x: &T): string::String
+
+ + + +
+Implementation + + +
inline fun format<T>(x: &T): String {
+    aptos_std::string_utils::debug_string(x)
+}
+
+ + + +
+ + + +## Function `native_print` + + + +
fun native_print(x: string::String)
+
+ + + +
+Implementation + + +
native fun native_print(x: String);
+
+ + + +
+ + + +## Function `native_stack_trace` + + + +
fun native_stack_trace(): string::String
+
+ + + +
+Implementation + + +
native fun native_stack_trace(): String;
+
+ + + +
+ + + +## Specification + + + + +### Function `print` + + +
public fun print<T>(x: &T)
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `print_stack_trace` + + +
public fun print_stack_trace()
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `native_print` + + +
fun native_print(x: string::String)
+
+ + + + +
pragma opaque;
+aborts_if false;
+
+ + + + + +### Function `native_stack_trace` + + +
fun native_stack_trace(): string::String
+
+ + + + +
pragma opaque;
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ed25519.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ed25519.md new file mode 100644 index 0000000000000..5122e741ba6a6 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ed25519.md @@ -0,0 +1,876 @@ + + + +# Module `0x1::ed25519` + +Contains functions for: + +1. [Ed25519](https://en.wikipedia.org/wiki/EdDSA#Ed25519) digital signatures: i.e., EdDSA signatures over Edwards25519 curves with co-factor 8 + + +- [Struct `SignedMessage`](#0x1_ed25519_SignedMessage) +- [Struct `UnvalidatedPublicKey`](#0x1_ed25519_UnvalidatedPublicKey) +- [Struct `ValidatedPublicKey`](#0x1_ed25519_ValidatedPublicKey) +- [Struct `Signature`](#0x1_ed25519_Signature) +- [Constants](#@Constants_0) +- [Function `new_unvalidated_public_key_from_bytes`](#0x1_ed25519_new_unvalidated_public_key_from_bytes) +- [Function `new_validated_public_key_from_bytes`](#0x1_ed25519_new_validated_public_key_from_bytes) +- [Function `new_signature_from_bytes`](#0x1_ed25519_new_signature_from_bytes) +- [Function `public_key_to_unvalidated`](#0x1_ed25519_public_key_to_unvalidated) +- [Function `public_key_into_unvalidated`](#0x1_ed25519_public_key_into_unvalidated) +- [Function `unvalidated_public_key_to_bytes`](#0x1_ed25519_unvalidated_public_key_to_bytes) +- [Function `validated_public_key_to_bytes`](#0x1_ed25519_validated_public_key_to_bytes) +- [Function `signature_to_bytes`](#0x1_ed25519_signature_to_bytes) +- [Function `public_key_validate`](#0x1_ed25519_public_key_validate) +- [Function `signature_verify_strict`](#0x1_ed25519_signature_verify_strict) +- [Function `signature_verify_strict_t`](#0x1_ed25519_signature_verify_strict_t) +- [Function `new_signed_message`](#0x1_ed25519_new_signed_message) +- [Function `unvalidated_public_key_to_authentication_key`](#0x1_ed25519_unvalidated_public_key_to_authentication_key) +- [Function `validated_public_key_to_authentication_key`](#0x1_ed25519_validated_public_key_to_authentication_key) +- [Function `public_key_bytes_to_authentication_key`](#0x1_ed25519_public_key_bytes_to_authentication_key) +- [Function `public_key_validate_internal`](#0x1_ed25519_public_key_validate_internal) +- [Function `signature_verify_strict_internal`](#0x1_ed25519_signature_verify_strict_internal) +- [Specification](#@Specification_1) + - [Function `new_unvalidated_public_key_from_bytes`](#@Specification_1_new_unvalidated_public_key_from_bytes) + - [Function `new_validated_public_key_from_bytes`](#@Specification_1_new_validated_public_key_from_bytes) + - [Function `new_signature_from_bytes`](#@Specification_1_new_signature_from_bytes) + - [Function `public_key_bytes_to_authentication_key`](#@Specification_1_public_key_bytes_to_authentication_key) + - [Function `public_key_validate_internal`](#@Specification_1_public_key_validate_internal) + - [Function `signature_verify_strict_internal`](#@Specification_1_signature_verify_strict_internal) + - [Helper functions](#@Helper_functions_2) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::hash;
+use 0x1::option;
+use 0x1::type_info;
+
+ + + + + +## Struct `SignedMessage` + +A BCS-serializable message, which one can verify signatures on via signature_verify_strict_t + + +
struct SignedMessage<MessageType> has drop
+
+ + + +
+Fields + + +
+
+type_info: type_info::TypeInfo +
+
+ +
+
+inner: MessageType +
+
+ +
+
+ + +
+ + + +## Struct `UnvalidatedPublicKey` + +An *unvalidated* Ed25519 public key: not necessarily an elliptic curve point, just a sequence of 32 bytes + + +
struct UnvalidatedPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `ValidatedPublicKey` + +A *validated* Ed25519 public key: not necessarily a prime-order point, could be mixed-order, but will never be +a small-order point. + +For now, this struct is not used in any verification functions, but it might be in the future. + + +
struct ValidatedPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `Signature` + +A purported Ed25519 signature that can be verified via signature_verify_strict or signature_verify_strict_t. + + +
struct Signature has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The size of a serialized public key, in bytes. + + +
const PUBLIC_KEY_NUM_BYTES: u64 = 32;
+
+ + + + + +Wrong number of bytes were given as input when deserializing an Ed25519 public key. + + +
const E_WRONG_PUBKEY_SIZE: u64 = 1;
+
+ + + + + +Wrong number of bytes were given as input when deserializing an Ed25519 signature. + + +
const E_WRONG_SIGNATURE_SIZE: u64 = 2;
+
+ + + + + +The size of a serialized signature, in bytes. + + +
const SIGNATURE_NUM_BYTES: u64 = 64;
+
+ + + + + +The identifier of the Ed25519 signature scheme, which is used when deriving Aptos authentication keys by hashing +it together with an Ed25519 public key. + + +
const SIGNATURE_SCHEME_ID: u8 = 0;
+
+ + + + + +## Function `new_unvalidated_public_key_from_bytes` + +Parses the input 32 bytes as an *unvalidated* Ed25519 public key. + + +
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): ed25519::UnvalidatedPublicKey
+
+ + + +
+Implementation + + +
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): UnvalidatedPublicKey {
+    assert!(std::vector::length(&bytes) == PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_WRONG_PUBKEY_SIZE));
+    UnvalidatedPublicKey { bytes }
+}
+
+ + + +
+ + + +## Function `new_validated_public_key_from_bytes` + +Parses the input 32 bytes as a *validated* Ed25519 public key. + + +
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): option::Option<ed25519::ValidatedPublicKey>
+
+ + + +
+Implementation + + +
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): Option<ValidatedPublicKey> {
+    if (public_key_validate_internal(bytes)) {
+        option::some(ValidatedPublicKey {
+            bytes
+        })
+    } else {
+        option::none<ValidatedPublicKey>()
+    }
+}
+
+ + + +
+ + + +## Function `new_signature_from_bytes` + +Parses the input 64 bytes as a purported Ed25519 signature. + + +
public fun new_signature_from_bytes(bytes: vector<u8>): ed25519::Signature
+
+ + + +
+Implementation + + +
public fun new_signature_from_bytes(bytes: vector<u8>): Signature {
+    assert!(std::vector::length(&bytes) == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_WRONG_SIGNATURE_SIZE));
+    Signature { bytes }
+}
+
+ + + +
+ + + +## Function `public_key_to_unvalidated` + +Converts a ValidatedPublicKey to an UnvalidatedPublicKey, which can be used in the strict verification APIs. + + +
public fun public_key_to_unvalidated(pk: &ed25519::ValidatedPublicKey): ed25519::UnvalidatedPublicKey
+
+ + + +
+Implementation + + +
public fun public_key_to_unvalidated(pk: &ValidatedPublicKey): UnvalidatedPublicKey {
+    UnvalidatedPublicKey {
+        bytes: pk.bytes
+    }
+}
+
+ + + +
+ + + +## Function `public_key_into_unvalidated` + +Moves a ValidatedPublicKey into an UnvalidatedPublicKey, which can be used in the strict verification APIs. + + +
public fun public_key_into_unvalidated(pk: ed25519::ValidatedPublicKey): ed25519::UnvalidatedPublicKey
+
+ + + +
+Implementation + + +
public fun public_key_into_unvalidated(pk: ValidatedPublicKey): UnvalidatedPublicKey {
+    UnvalidatedPublicKey {
+        bytes: pk.bytes
+    }
+}
+
+ + + +
+ + + +## Function `unvalidated_public_key_to_bytes` + +Serializes an UnvalidatedPublicKey struct to 32-bytes. + + +
public fun unvalidated_public_key_to_bytes(pk: &ed25519::UnvalidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun unvalidated_public_key_to_bytes(pk: &UnvalidatedPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `validated_public_key_to_bytes` + +Serializes an ValidatedPublicKey struct to 32-bytes. + + +
public fun validated_public_key_to_bytes(pk: &ed25519::ValidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun validated_public_key_to_bytes(pk: &ValidatedPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `signature_to_bytes` + +Serializes a Signature struct to 64-bytes. + + +
public fun signature_to_bytes(sig: &ed25519::Signature): vector<u8>
+
+ + + +
+Implementation + + +
public fun signature_to_bytes(sig: &Signature): vector<u8> {
+    sig.bytes
+}
+
+ + + +
+ + + +## Function `public_key_validate` + +Takes in an *unvalidated* public key and attempts to validate it. +Returns Some(ValidatedPublicKey) if successful and None otherwise. + + +
public fun public_key_validate(pk: &ed25519::UnvalidatedPublicKey): option::Option<ed25519::ValidatedPublicKey>
+
+ + + +
+Implementation + + +
public fun public_key_validate(pk: &UnvalidatedPublicKey): Option<ValidatedPublicKey> {
+    new_validated_public_key_from_bytes(pk.bytes)
+}
+
+ + + +
+ + + +## Function `signature_verify_strict` + +Verifies a purported Ed25519 signature under an *unvalidated* public_key on the specified message. +This call will validate the public key by checking it is NOT in the small subgroup. + + +
public fun signature_verify_strict(signature: &ed25519::Signature, public_key: &ed25519::UnvalidatedPublicKey, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun signature_verify_strict(
+    signature: &Signature,
+    public_key: &UnvalidatedPublicKey,
+    message: vector<u8>
+): bool {
+    signature_verify_strict_internal(signature.bytes, public_key.bytes, message)
+}
+
+ + + +
+ + + +## Function `signature_verify_strict_t` + +This function is used to verify a signature on any BCS-serializable type T. For now, it is used to verify the +proof of private key ownership when rotating authentication keys. + + +
public fun signature_verify_strict_t<T: drop>(signature: &ed25519::Signature, public_key: &ed25519::UnvalidatedPublicKey, data: T): bool
+
+ + + +
+Implementation + + +
public fun signature_verify_strict_t<T: drop>(signature: &Signature, public_key: &UnvalidatedPublicKey, data: T): bool {
+    let encoded = SignedMessage {
+        type_info: type_info::type_of<T>(),
+        inner: data,
+    };
+
+    signature_verify_strict_internal(signature.bytes, public_key.bytes, bcs::to_bytes(&encoded))
+}
+
+ + + +
+ + + +## Function `new_signed_message` + +Helper method to construct a SignedMessage struct. + + +
public fun new_signed_message<T: drop>(data: T): ed25519::SignedMessage<T>
+
+ + + +
+Implementation + + +
public fun new_signed_message<T: drop>(data: T): SignedMessage<T> {
+    SignedMessage {
+        type_info: type_info::type_of<T>(),
+        inner: data,
+    }
+}
+
+ + + +
+ + + +## Function `unvalidated_public_key_to_authentication_key` + +Derives the Aptos-specific authentication key of the given Ed25519 public key. + + +
public fun unvalidated_public_key_to_authentication_key(pk: &ed25519::UnvalidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun unvalidated_public_key_to_authentication_key(pk: &UnvalidatedPublicKey): vector<u8> {
+    public_key_bytes_to_authentication_key(pk.bytes)
+}
+
+ + + +
+ + + +## Function `validated_public_key_to_authentication_key` + +Derives the Aptos-specific authentication key of the given Ed25519 public key. + + +
public fun validated_public_key_to_authentication_key(pk: &ed25519::ValidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun validated_public_key_to_authentication_key(pk: &ValidatedPublicKey): vector<u8> {
+    public_key_bytes_to_authentication_key(pk.bytes)
+}
+
+ + + +
+ + + +## Function `public_key_bytes_to_authentication_key` + +Derives the Aptos-specific authentication key of the given Ed25519 public key. + + +
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8> {
+    std::vector::push_back(&mut pk_bytes, SIGNATURE_SCHEME_ID);
+    std::hash::sha3_256(pk_bytes)
+}
+
+ + + +
+ + + +## Function `public_key_validate_internal` + +Return true if the bytes in public_key can be parsed as a valid Ed25519 public key: i.e., it passes +points-on-curve and not-in-small-subgroup checks. +Returns false otherwise. + + +
fun public_key_validate_internal(bytes: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun public_key_validate_internal(bytes: vector<u8>): bool;
+
+ + + +
+ + + +## Function `signature_verify_strict_internal` + +Return true if the Ed25519 signature on message verifies against the Ed25519 public_key. +Returns false if either: +- signature or public key are of wrong sizes +- public_key does not pass points-on-curve or not-in-small-subgroup checks, +- signature does not pass points-on-curve or not-in-small-subgroup checks, +- the signature on message does not verify. + + +
fun signature_verify_strict_internal(signature: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun signature_verify_strict_internal(
+    signature: vector<u8>,
+    public_key: vector<u8>,
+    message: vector<u8>
+): bool;
+
+ + + +
+ + + +## Specification + + + + +### Function `new_unvalidated_public_key_from_bytes` + + +
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): ed25519::UnvalidatedPublicKey
+
+ + + + +
include NewUnvalidatedPublicKeyFromBytesAbortsIf;
+ensures result == UnvalidatedPublicKey { bytes };
+
+ + + + + + + +
schema NewUnvalidatedPublicKeyFromBytesAbortsIf {
+    bytes: vector<u8>;
+    aborts_if len(bytes) != PUBLIC_KEY_NUM_BYTES;
+}
+
+ + + + + +### Function `new_validated_public_key_from_bytes` + + +
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): option::Option<ed25519::ValidatedPublicKey>
+
+ + + + +
aborts_if false;
+let cond = spec_public_key_validate_internal(bytes);
+ensures cond ==> result == option::spec_some(ValidatedPublicKey{bytes});
+ensures !cond ==> result == option::spec_none<ValidatedPublicKey>();
+
+ + + + + +### Function `new_signature_from_bytes` + + +
public fun new_signature_from_bytes(bytes: vector<u8>): ed25519::Signature
+
+ + + + +
include NewSignatureFromBytesAbortsIf;
+ensures result == Signature { bytes };
+
+ + + + + + + +
schema NewSignatureFromBytesAbortsIf {
+    bytes: vector<u8>;
+    aborts_if len(bytes) != SIGNATURE_NUM_BYTES;
+}
+
+ + + + + +### Function `public_key_bytes_to_authentication_key` + + +
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures [abstract] result == spec_public_key_bytes_to_authentication_key(pk_bytes);
+
+ + + + + +### Function `public_key_validate_internal` + + +
fun public_key_validate_internal(bytes: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_public_key_validate_internal(bytes);
+
+ + + + + +### Function `signature_verify_strict_internal` + + +
fun signature_verify_strict_internal(signature: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_signature_verify_strict_internal(signature, public_key, message);
+
+ + + + + +### Helper functions + + + + + + +
fun spec_signature_verify_strict_internal(
+   signature: vector<u8>,
+   public_key: vector<u8>,
+   message: vector<u8>
+): bool;
+
+ + + + + + + +
fun spec_public_key_validate_internal(bytes: vector<u8>): bool;
+
+ + + + + + + +
fun spec_public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_signature_verify_strict_t<T>(signature: Signature, public_key: UnvalidatedPublicKey, data: T): bool {
+   let encoded = SignedMessage<T> {
+       type_info: type_info::type_of<T>(),
+       inner: data,
+   };
+   let message = bcs::serialize(encoded);
+   spec_signature_verify_strict_internal(signature.bytes, public_key.bytes, message)
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/fixed_point64.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/fixed_point64.md new file mode 100644 index 0000000000000..ffa6762481ade --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/fixed_point64.md @@ -0,0 +1,1333 @@ + + + +# Module `0x1::fixed_point64` + +Defines a fixed-point numeric type with a 64-bit integer part and +a 64-bit fractional part. + + +- [Struct `FixedPoint64`](#0x1_fixed_point64_FixedPoint64) +- [Constants](#@Constants_0) +- [Function `sub`](#0x1_fixed_point64_sub) +- [Function `add`](#0x1_fixed_point64_add) +- [Function `multiply_u128`](#0x1_fixed_point64_multiply_u128) +- [Function `divide_u128`](#0x1_fixed_point64_divide_u128) +- [Function `create_from_rational`](#0x1_fixed_point64_create_from_rational) +- [Function `create_from_raw_value`](#0x1_fixed_point64_create_from_raw_value) +- [Function `get_raw_value`](#0x1_fixed_point64_get_raw_value) +- [Function `is_zero`](#0x1_fixed_point64_is_zero) +- [Function `min`](#0x1_fixed_point64_min) +- [Function `max`](#0x1_fixed_point64_max) +- [Function `less_or_equal`](#0x1_fixed_point64_less_or_equal) +- [Function `less`](#0x1_fixed_point64_less) +- [Function `greater_or_equal`](#0x1_fixed_point64_greater_or_equal) +- [Function `greater`](#0x1_fixed_point64_greater) +- [Function `equal`](#0x1_fixed_point64_equal) +- [Function `almost_equal`](#0x1_fixed_point64_almost_equal) +- [Function `create_from_u128`](#0x1_fixed_point64_create_from_u128) +- [Function `floor`](#0x1_fixed_point64_floor) +- [Function `ceil`](#0x1_fixed_point64_ceil) +- [Function `round`](#0x1_fixed_point64_round) +- [Specification](#@Specification_1) + - [Function `sub`](#@Specification_1_sub) + - [Function `add`](#@Specification_1_add) + - [Function `multiply_u128`](#@Specification_1_multiply_u128) + - [Function `divide_u128`](#@Specification_1_divide_u128) + - [Function `create_from_rational`](#@Specification_1_create_from_rational) + - [Function `create_from_raw_value`](#@Specification_1_create_from_raw_value) + - [Function `min`](#@Specification_1_min) + - [Function `max`](#@Specification_1_max) + - [Function `less_or_equal`](#@Specification_1_less_or_equal) + - [Function `less`](#@Specification_1_less) + - [Function `greater_or_equal`](#@Specification_1_greater_or_equal) + - [Function `greater`](#@Specification_1_greater) + - [Function `equal`](#@Specification_1_equal) + - [Function `almost_equal`](#@Specification_1_almost_equal) + - [Function `create_from_u128`](#@Specification_1_create_from_u128) + - [Function `floor`](#@Specification_1_floor) + - [Function `ceil`](#@Specification_1_ceil) + - [Function `round`](#@Specification_1_round) + + +
+ + + + + +## Struct `FixedPoint64` + +Define a fixed-point numeric type with 64 fractional bits. +This is just a u128 integer but it is wrapped in a struct to +make a unique type. This is a binary representation, so decimal +values may not be exactly representable, but it provides more +than 9 decimal digits of precision both before and after the +decimal point (18 digits total). For comparison, double precision +floating-point has less than 16 decimal digits of precision, so +be careful about using floating-point to convert these values to +decimal. + + +
struct FixedPoint64 has copy, drop, store
+
+ + + +
+Fields + + +
+
+value: u128 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U128: u256 = 340282366920938463463374607431768211455;
+
+ + + + + +The denominator provided was zero + + +
const EDENOMINATOR: u64 = 65537;
+
+ + + + + +The quotient value would be too large to be held in a u128 + + +
const EDIVISION: u64 = 131074;
+
+ + + + + +A division by zero was encountered + + +
const EDIVISION_BY_ZERO: u64 = 65540;
+
+ + + + + +The multiplied value would be too large to be held in a u128 + + +
const EMULTIPLICATION: u64 = 131075;
+
+ + + + + +Abort code on calculation result is negative. + + +
const ENEGATIVE_RESULT: u64 = 65542;
+
+ + + + + +The computed ratio when converting to a FixedPoint64 would be unrepresentable + + +
const ERATIO_OUT_OF_RANGE: u64 = 131077;
+
+ + + + + +## Function `sub` + +Returns self - y. self must be not less than y. + + +
public fun sub(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun sub(self: FixedPoint64, y: FixedPoint64): FixedPoint64 {
+    let x_raw = get_raw_value(self);
+    let y_raw = get_raw_value(y);
+    assert!(x_raw >= y_raw, ENEGATIVE_RESULT);
+    create_from_raw_value(x_raw - y_raw)
+}
+
+ + + +
+ + + +## Function `add` + +Returns self + y. The result cannot be greater than MAX_U128. + + +
public fun add(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun add(self: FixedPoint64, y: FixedPoint64): FixedPoint64 {
+    let x_raw = get_raw_value(self);
+    let y_raw = get_raw_value(y);
+    let result = (x_raw as u256) + (y_raw as u256);
+    assert!(result <= MAX_U128, ERATIO_OUT_OF_RANGE);
+    create_from_raw_value((result as u128))
+}
+
+ + + +
+ + + +## Function `multiply_u128` + +Multiply a u128 integer by a fixed-point number, truncating any +fractional part of the product. This will abort if the product +overflows. + + +
public fun multiply_u128(val: u128, multiplier: fixed_point64::FixedPoint64): u128
+
+ + + +
+Implementation + + +
public fun multiply_u128(val: u128, multiplier: FixedPoint64): u128 {
+    // The product of two 128 bit values has 256 bits, so perform the
+    // multiplication with u256 types and keep the full 256 bit product
+    // to avoid losing accuracy.
+    let unscaled_product = (val as u256) * (multiplier.value as u256);
+    // The unscaled product has 64 fractional bits (from the multiplier)
+    // so rescale it by shifting away the low bits.
+    let product = unscaled_product >> 64;
+    // Check whether the value is too large.
+    assert!(product <= MAX_U128, EMULTIPLICATION);
+    (product as u128)
+}
+
+ + + +
+ + + +## Function `divide_u128` + +Divide a u128 integer by a fixed-point number, truncating any +fractional part of the quotient. This will abort if the divisor +is zero or if the quotient overflows. + + +
public fun divide_u128(val: u128, divisor: fixed_point64::FixedPoint64): u128
+
+ + + +
+Implementation + + +
public fun divide_u128(val: u128, divisor: FixedPoint64): u128 {
+    // Check for division by zero.
+    assert!(divisor.value != 0, EDIVISION_BY_ZERO);
+    // First convert to 256 bits and then shift left to
+    // add 64 fractional zero bits to the dividend.
+    let scaled_value = (val as u256) << 64;
+    let quotient = scaled_value / (divisor.value as u256);
+    // Check whether the value is too large.
+    assert!(quotient <= MAX_U128, EDIVISION);
+    // the value may be too large, which will cause the cast to fail
+    // with an arithmetic error.
+    (quotient as u128)
+}
+
+ + + +
+ + + +## Function `create_from_rational` + +Create a fixed-point value from a rational number specified by its +numerator and denominator. Calling this function should be preferred +for using Self::create_from_raw_value which is also available. +This will abort if the denominator is zero. It will also +abort if the numerator is nonzero and the ratio is not in the range +2^-64 .. 2^64-1. When specifying decimal fractions, be careful about +rounding errors: if you round to display N digits after the decimal +point, you can use a denominator of 10^N to avoid numbers where the +very small imprecision in the binary representation could change the +rounding, e.g., 0.0125 will round down to 0.012 instead of up to 0.013. + + +
public fun create_from_rational(numerator: u128, denominator: u128): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun create_from_rational(numerator: u128, denominator: u128): FixedPoint64 {
+    // If the denominator is zero, this will abort.
+    // Scale the numerator to have 64 fractional bits, so that the quotient will have 64
+    // fractional bits.
+    let scaled_numerator = (numerator as u256) << 64;
+    assert!(denominator != 0, EDENOMINATOR);
+    let quotient = scaled_numerator / (denominator as u256);
+    assert!(quotient != 0 || numerator == 0, ERATIO_OUT_OF_RANGE);
+    // Return the quotient as a fixed-point number. We first need to check whether the cast
+    // can succeed.
+    assert!(quotient <= MAX_U128, ERATIO_OUT_OF_RANGE);
+    FixedPoint64 { value: (quotient as u128) }
+}
+
+ + + +
+ + + +## Function `create_from_raw_value` + +Create a fixedpoint value from a raw value. + + +
public fun create_from_raw_value(value: u128): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun create_from_raw_value(value: u128): FixedPoint64 {
+    FixedPoint64 { value }
+}
+
+ + + +
+ + + +## Function `get_raw_value` + +Accessor for the raw u128 value. Other less common operations, such as +adding or subtracting FixedPoint64 values, can be done using the raw +values directly. + + +
public fun get_raw_value(self: fixed_point64::FixedPoint64): u128
+
+ + + +
+Implementation + + +
public fun get_raw_value(self: FixedPoint64): u128 {
+    self.value
+}
+
+ + + +
+ + + +## Function `is_zero` + +Returns true if the ratio is zero. + + +
public fun is_zero(self: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun is_zero(self: FixedPoint64): bool {
+    self.value == 0
+}
+
+ + + +
+ + + +## Function `min` + +Returns the smaller of the two FixedPoint64 numbers. + + +
public fun min(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun min(num1: FixedPoint64, num2: FixedPoint64): FixedPoint64 {
+    if (num1.value < num2.value) {
+        num1
+    } else {
+        num2
+    }
+}
+
+ + + +
+ + + +## Function `max` + +Returns the larger of the two FixedPoint64 numbers. + + +
public fun max(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun max(num1: FixedPoint64, num2: FixedPoint64): FixedPoint64 {
+    if (num1.value > num2.value) {
+        num1
+    } else {
+        num2
+    }
+}
+
+ + + +
+ + + +## Function `less_or_equal` + +Returns true if self <= num2 + + +
public fun less_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value <= num2.value
+}
+
+ + + +
+ + + +## Function `less` + +Returns true if self < num2 + + +
public fun less(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun less(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value < num2.value
+}
+
+ + + +
+ + + +## Function `greater_or_equal` + +Returns true if self >= num2 + + +
public fun greater_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value >= num2.value
+}
+
+ + + +
+ + + +## Function `greater` + +Returns true if self > num2 + + +
public fun greater(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun greater(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value > num2.value
+}
+
+ + + +
+ + + +## Function `equal` + +Returns true if self = num2 + + +
public fun equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value == num2.value
+}
+
+ + + +
+ + + +## Function `almost_equal` + +Returns true if self almost equals to num2, which means abs(num1-num2) <= precision + + +
public fun almost_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
+
+ + + +
+Implementation + + +
public fun almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
+    if (self.value > num2.value) {
+        (self.value - num2.value <= precision.value)
+    } else {
+        (num2.value - self.value <= precision.value)
+    }
+}
+
+ + + +
+ + + +## Function `create_from_u128` + +Create a fixedpoint value from a u128 value. + + +
public fun create_from_u128(val: u128): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun create_from_u128(val: u128): FixedPoint64 {
+    let value = (val as u256) << 64;
+    assert!(value <= MAX_U128, ERATIO_OUT_OF_RANGE);
+    FixedPoint64 {value: (value as u128)}
+}
+
+ + + +
+ + + +## Function `floor` + +Returns the largest integer less than or equal to a given number. + + +
public fun floor(self: fixed_point64::FixedPoint64): u128
+
+ + + +
+Implementation + + +
public fun floor(self: FixedPoint64): u128 {
+    self.value >> 64
+}
+
+ + + +
+ + + +## Function `ceil` + +Rounds up the given FixedPoint64 to the next largest integer. + + +
public fun ceil(self: fixed_point64::FixedPoint64): u128
+
+ + + +
+Implementation + + +
public fun ceil(self: FixedPoint64): u128 {
+    let floored_num = floor(self) << 64;
+    if (self.value == floored_num) {
+        return floored_num >> 64
+    };
+    let val = ((floored_num as u256) + (1 << 64));
+    (val >> 64 as u128)
+}
+
+ + + +
+ + + +## Function `round` + +Returns the value of a FixedPoint64 to the nearest integer. + + +
public fun round(self: fixed_point64::FixedPoint64): u128
+
+ + + +
+Implementation + + +
public fun round(self: FixedPoint64): u128 {
+    let floored_num = floor(self) << 64;
+    let boundary = floored_num + ((1 << 64) / 2);
+    if (self.value < boundary) {
+        floored_num >> 64
+    } else {
+        ceil(self)
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +
pragma aborts_if_is_strict;
+
+ + + + + +### Function `sub` + + +
public fun sub(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+aborts_if self.value < y.value with ENEGATIVE_RESULT;
+ensures result.value == self.value - y.value;
+
+ + + + + +### Function `add` + + +
public fun add(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+aborts_if (self.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE;
+ensures result.value == self.value + y.value;
+
+ + + + + +### Function `multiply_u128` + + +
public fun multiply_u128(val: u128, multiplier: fixed_point64::FixedPoint64): u128
+
+ + + + +
pragma opaque;
+include MultiplyAbortsIf;
+ensures result == spec_multiply_u128(val, multiplier);
+
+ + + + + + + +
schema MultiplyAbortsIf {
+    val: num;
+    multiplier: FixedPoint64;
+    aborts_if spec_multiply_u128(val, multiplier) > MAX_U128 with EMULTIPLICATION;
+}
+
+ + + + + + + +
fun spec_multiply_u128(val: num, multiplier: FixedPoint64): num {
+   (val * multiplier.value) >> 64
+}
+
+ + + + + +### Function `divide_u128` + + +
public fun divide_u128(val: u128, divisor: fixed_point64::FixedPoint64): u128
+
+ + + + +
pragma opaque;
+include DivideAbortsIf;
+ensures result == spec_divide_u128(val, divisor);
+
+ + + + + + + +
schema DivideAbortsIf {
+    val: num;
+    divisor: FixedPoint64;
+    aborts_if divisor.value == 0 with EDIVISION_BY_ZERO;
+    aborts_if spec_divide_u128(val, divisor) > MAX_U128 with EDIVISION;
+}
+
+ + + + + + + +
fun spec_divide_u128(val: num, divisor: FixedPoint64): num {
+   (val << 64) / divisor.value
+}
+
+ + + + + +### Function `create_from_rational` + + +
public fun create_from_rational(numerator: u128, denominator: u128): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+pragma verify_duration_estimate = 1000;
+include CreateFromRationalAbortsIf;
+ensures result == spec_create_from_rational(numerator, denominator);
+
+ + + + + + + +
schema CreateFromRationalAbortsIf {
+    numerator: u128;
+    denominator: u128;
+    let scaled_numerator = (numerator as u256)<< 64;
+    let scaled_denominator = (denominator as u256);
+    let quotient = scaled_numerator / scaled_denominator;
+    aborts_if scaled_denominator == 0 with EDENOMINATOR;
+    aborts_if quotient == 0 && scaled_numerator != 0 with ERATIO_OUT_OF_RANGE;
+    aborts_if quotient > MAX_U128 with ERATIO_OUT_OF_RANGE;
+}
+
+ + + + + + + +
fun spec_create_from_rational(numerator: num, denominator: num): FixedPoint64 {
+   FixedPoint64{value: (numerator << 128) / (denominator << 64)}
+}
+
+ + + + + +### Function `create_from_raw_value` + + +
public fun create_from_raw_value(value: u128): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result.value == value;
+
+ + + + + +### Function `min` + + +
public fun min(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_min(num1, num2);
+
+ + + + + + + +
fun spec_min(num1: FixedPoint64, num2: FixedPoint64): FixedPoint64 {
+   if (num1.value < num2.value) {
+       num1
+   } else {
+       num2
+   }
+}
+
+ + + + + +### Function `max` + + +
public fun max(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_max(num1, num2);
+
+ + + + + + + +
fun spec_max(num1: FixedPoint64, num2: FixedPoint64): FixedPoint64 {
+   if (num1.value > num2.value) {
+       num1
+   } else {
+       num2
+   }
+}
+
+ + + + + +### Function `less_or_equal` + + +
public fun less_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_less_or_equal(self, num2);
+
+ + + + + + + +
fun spec_less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value <= num2.value
+}
+
+ + + + + +### Function `less` + + +
public fun less(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_less(self, num2);
+
+ + + + + + + +
fun spec_less(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value < num2.value
+}
+
+ + + + + +### Function `greater_or_equal` + + +
public fun greater_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_greater_or_equal(self, num2);
+
+ + + + + + + +
fun spec_greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value >= num2.value
+}
+
+ + + + + +### Function `greater` + + +
public fun greater(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_greater(self, num2);
+
+ + + + + + + +
fun spec_greater(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value > num2.value
+}
+
+ + + + + +### Function `equal` + + +
public fun equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_equal(self, num2);
+
+ + + + + + + +
fun spec_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value == num2.value
+}
+
+ + + + + +### Function `almost_equal` + + +
public fun almost_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_almost_equal(self, num2, precision);
+
+ + + + + + + +
fun spec_almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
+   if (self.value > num2.value) {
+       (self.value - num2.value <= precision.value)
+   } else {
+       (num2.value - self.value <= precision.value)
+   }
+}
+
+ + + + + +### Function `create_from_u128` + + +
public fun create_from_u128(val: u128): fixed_point64::FixedPoint64
+
+ + + + +
pragma opaque;
+include CreateFromU64AbortsIf;
+ensures result == spec_create_from_u128(val);
+
+ + + + + + + +
schema CreateFromU64AbortsIf {
+    val: num;
+    let scaled_value = (val as u256) << 64;
+    aborts_if scaled_value > MAX_U128;
+}
+
+ + + + + + + +
fun spec_create_from_u128(val: num): FixedPoint64 {
+   FixedPoint64 {value: val << 64}
+}
+
+ + + + + +### Function `floor` + + +
public fun floor(self: fixed_point64::FixedPoint64): u128
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_floor(self);
+
+ + + + + + + +
fun spec_floor(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
+   if (fractional == 0) {
+       self.value >> 64
+   } else {
+       (self.value - fractional) >> 64
+   }
+}
+
+ + + + + +### Function `ceil` + + +
public fun ceil(self: fixed_point64::FixedPoint64): u128
+
+ + + + +
pragma verify_duration_estimate = 1000;
+pragma opaque;
+aborts_if false;
+ensures result == spec_ceil(self);
+
+ + + + + + + +
fun spec_ceil(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
+   let one = 1 << 64;
+   if (fractional == 0) {
+       self.value >> 64
+   } else {
+       (self.value - fractional + one) >> 64
+   }
+}
+
+ + + + + +### Function `round` + + +
public fun round(self: fixed_point64::FixedPoint64): u128
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_round(self);
+
+ + + + + + + +
fun spec_round(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
+   let boundary = (1 << 64) / 2;
+   let one = 1 << 64;
+   if (fractional < boundary) {
+       (self.value - fractional) >> 64
+   } else {
+       (self.value - fractional + one) >> 64
+   }
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/from_bcs.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/from_bcs.md new file mode 100644 index 0000000000000..5838c78fdd758 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/from_bcs.md @@ -0,0 +1,363 @@ + + + +# Module `0x1::from_bcs` + +This module provides a number of functions to convert _primitive_ types from their representation in std::bcs +to values. This is the opposite of bcs::to_bytes. Note that it is not safe to define a generic public from_bytes +function because this can violate implicit struct invariants, therefore only primitive types are offerred. If +a general conversion back-and-force is needed, consider the aptos_std::Any type which preserves invariants. + +Example: +``` +use std::bcs; +use aptos_std::from_bcs; + +assert!(from_bcs::to_address(bcs::to_bytes(&@0xabcdef)) == @0xabcdef, 0); +``` + + +- [Constants](#@Constants_0) +- [Function `to_bool`](#0x1_from_bcs_to_bool) +- [Function `to_u8`](#0x1_from_bcs_to_u8) +- [Function `to_u16`](#0x1_from_bcs_to_u16) +- [Function `to_u32`](#0x1_from_bcs_to_u32) +- [Function `to_u64`](#0x1_from_bcs_to_u64) +- [Function `to_u128`](#0x1_from_bcs_to_u128) +- [Function `to_u256`](#0x1_from_bcs_to_u256) +- [Function `to_address`](#0x1_from_bcs_to_address) +- [Function `to_bytes`](#0x1_from_bcs_to_bytes) +- [Function `to_string`](#0x1_from_bcs_to_string) +- [Function `from_bytes`](#0x1_from_bcs_from_bytes) +- [Specification](#@Specification_1) + - [Function `from_bytes`](#@Specification_1_from_bytes) + + +
use 0x1::string;
+
+ + + + + +## Constants + + + + +UTF8 check failed in conversion from bytes to string + + +
const EINVALID_UTF8: u64 = 1;
+
+ + + + + +## Function `to_bool` + + + +
public fun to_bool(v: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun to_bool(v: vector<u8>): bool {
+    from_bytes<bool>(v)
+}
+
+ + + +
+ + + +## Function `to_u8` + + + +
public fun to_u8(v: vector<u8>): u8
+
+ + + +
+Implementation + + +
public fun to_u8(v: vector<u8>): u8 {
+    from_bytes<u8>(v)
+}
+
+ + + +
+ + + +## Function `to_u16` + + + +
public fun to_u16(v: vector<u8>): u16
+
+ + + +
+Implementation + + +
public fun to_u16(v: vector<u8>): u16 {
+    from_bytes<u16>(v)
+}
+
+ + + +
+ + + +## Function `to_u32` + + + +
public fun to_u32(v: vector<u8>): u32
+
+ + + +
+Implementation + + +
public fun to_u32(v: vector<u8>): u32 {
+    from_bytes<u32>(v)
+}
+
+ + + +
+ + + +## Function `to_u64` + + + +
public fun to_u64(v: vector<u8>): u64
+
+ + + +
+Implementation + + +
public fun to_u64(v: vector<u8>): u64 {
+    from_bytes<u64>(v)
+}
+
+ + + +
+ + + +## Function `to_u128` + + + +
public fun to_u128(v: vector<u8>): u128
+
+ + + +
+Implementation + + +
public fun to_u128(v: vector<u8>): u128 {
+    from_bytes<u128>(v)
+}
+
+ + + +
+ + + +## Function `to_u256` + + + +
public fun to_u256(v: vector<u8>): u256
+
+ + + +
+Implementation + + +
public fun to_u256(v: vector<u8>): u256 {
+    from_bytes<u256>(v)
+}
+
+ + + +
+ + + +## Function `to_address` + + + +
public fun to_address(v: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun to_address(v: vector<u8>): address {
+    from_bytes<address>(v)
+}
+
+ + + +
+ + + +## Function `to_bytes` + + + +
public fun to_bytes(v: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun to_bytes(v: vector<u8>): vector<u8> {
+    from_bytes<vector<u8>>(v)
+}
+
+ + + +
+ + + +## Function `to_string` + + + +
public fun to_string(v: vector<u8>): string::String
+
+ + + +
+Implementation + + +
public fun to_string(v: vector<u8>): String {
+    // To make this safe, we need to evaluate the utf8 invariant.
+    let s = from_bytes<String>(v);
+    assert!(string::internal_check_utf8(string::bytes(&s)), EINVALID_UTF8);
+    s
+}
+
+ + + +
+ + + +## Function `from_bytes` + +Package private native function to deserialize a type T. + +Note that this function does not put any constraint on T. If code uses this function to +deserialize a linear value, its their responsibility that the data they deserialize is +owned. + + +
public(friend) fun from_bytes<T>(bytes: vector<u8>): T
+
+ + + +
+Implementation + + +
public(friend) native fun from_bytes<T>(bytes: vector<u8>): T;
+
+ + + +
+ + + +## Specification + + + + + + +
fun deserialize<T>(bytes: vector<u8>): T;
+
+fun deserializable<T>(bytes: vector<u8>): bool;
+axiom<T> forall b1: vector<u8>, b2: vector<u8>:
+    ( b1 == b2 ==> deserializable<T>(b1) == deserializable<T>(b2) );
+axiom<T> forall b1: vector<u8>, b2: vector<u8>:
+    ( b1 == b2 ==> deserialize<T>(b1) == deserialize<T>(b2) );
+
+ + + + + +### Function `from_bytes` + + +
public(friend) fun from_bytes<T>(bytes: vector<u8>): T
+
+ + + + +
pragma opaque;
+aborts_if !deserializable<T>(bytes);
+ensures result == deserialize<T>(bytes);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/hash.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/hash.md new file mode 100644 index 0000000000000..908970ce7c41d --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/hash.md @@ -0,0 +1,623 @@ + + + +# Module `0x1::aptos_hash` + +Cryptographic hashes: +- Keccak-256: see https://keccak.team/keccak.html + +In addition, SHA2-256 and SHA3-256 are available in std::hash. Note that SHA3-256 is a variant of Keccak: it is +NOT the same as Keccak-256. + +Non-cryptograhic hashes: +- SipHash: an add-rotate-xor (ARX) based family of pseudorandom functions created by Jean-Philippe Aumasson and Daniel J. Bernstein in 2012 + + +- [Constants](#@Constants_0) +- [Function `sip_hash`](#0x1_aptos_hash_sip_hash) +- [Function `sip_hash_from_value`](#0x1_aptos_hash_sip_hash_from_value) +- [Function `keccak256`](#0x1_aptos_hash_keccak256) +- [Function `sha2_512`](#0x1_aptos_hash_sha2_512) +- [Function `sha3_512`](#0x1_aptos_hash_sha3_512) +- [Function `ripemd160`](#0x1_aptos_hash_ripemd160) +- [Function `blake2b_256`](#0x1_aptos_hash_blake2b_256) +- [Function `sha2_512_internal`](#0x1_aptos_hash_sha2_512_internal) +- [Function `sha3_512_internal`](#0x1_aptos_hash_sha3_512_internal) +- [Function `ripemd160_internal`](#0x1_aptos_hash_ripemd160_internal) +- [Function `blake2b_256_internal`](#0x1_aptos_hash_blake2b_256_internal) +- [Specification](#@Specification_1) + - [Function `sip_hash`](#@Specification_1_sip_hash) + - [Function `sip_hash_from_value`](#@Specification_1_sip_hash_from_value) + - [Function `keccak256`](#@Specification_1_keccak256) + - [Function `sha2_512`](#@Specification_1_sha2_512) + - [Function `sha3_512`](#@Specification_1_sha3_512) + - [Function `ripemd160`](#@Specification_1_ripemd160) + - [Function `blake2b_256`](#@Specification_1_blake2b_256) + - [Function `sha2_512_internal`](#@Specification_1_sha2_512_internal) + - [Function `sha3_512_internal`](#@Specification_1_sha3_512_internal) + - [Function `ripemd160_internal`](#@Specification_1_ripemd160_internal) + - [Function `blake2b_256_internal`](#@Specification_1_blake2b_256_internal) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::features;
+
+ + + + + +## Constants + + + + +A newly-added native function is not yet enabled. + + +
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 1;
+
+ + + + + +## Function `sip_hash` + +Returns the (non-cryptographic) SipHash of bytes. See https://en.wikipedia.org/wiki/SipHash + + +
public fun sip_hash(bytes: vector<u8>): u64
+
+ + + +
+Implementation + + +
native public fun sip_hash(bytes: vector<u8>): u64;
+
+ + + +
+ + + +## Function `sip_hash_from_value` + +Returns the (non-cryptographic) SipHash of the BCS serialization of v. See https://en.wikipedia.org/wiki/SipHash + + +
public fun sip_hash_from_value<MoveValue>(v: &MoveValue): u64
+
+ + + +
+Implementation + + +
public fun sip_hash_from_value<MoveValue>(v: &MoveValue): u64 {
+    let bytes = bcs::to_bytes(v);
+
+    sip_hash(bytes)
+}
+
+ + + +
+ + + +## Function `keccak256` + +Returns the Keccak-256 hash of bytes. + + +
public fun keccak256(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native public fun keccak256(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `sha2_512` + +Returns the SHA2-512 hash of bytes. + + +
public fun sha2_512(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun sha2_512(bytes: vector<u8>): vector<u8> {
+    if(!features::sha_512_and_ripemd_160_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    sha2_512_internal(bytes)
+}
+
+ + + +
+ + + +## Function `sha3_512` + +Returns the SHA3-512 hash of bytes. + + +
public fun sha3_512(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun sha3_512(bytes: vector<u8>): vector<u8> {
+    if(!features::sha_512_and_ripemd_160_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    sha3_512_internal(bytes)
+}
+
+ + + +
+ + + +## Function `ripemd160` + +Returns the RIPEMD-160 hash of bytes. + +WARNING: Only 80-bit security is provided by this function. This means an adversary who can compute roughly 2^80 +hashes will, with high probability, find a collision x_1 != x_2 such that RIPEMD-160(x_1) = RIPEMD-160(x_2). + + +
public fun ripemd160(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun ripemd160(bytes: vector<u8>): vector<u8> {
+    if(!features::sha_512_and_ripemd_160_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    ripemd160_internal(bytes)
+}
+
+ + + +
+ + + +## Function `blake2b_256` + +Returns the BLAKE2B-256 hash of bytes. + + +
public fun blake2b_256(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun blake2b_256(bytes: vector<u8>): vector<u8> {
+    if(!features::blake2b_256_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    blake2b_256_internal(bytes)
+}
+
+ + + +
+ + + +## Function `sha2_512_internal` + +Returns the SHA2-512 hash of bytes. + + +
fun sha2_512_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun sha2_512_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `sha3_512_internal` + +Returns the SHA3-512 hash of bytes. + + +
fun sha3_512_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun sha3_512_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `ripemd160_internal` + +Returns the RIPEMD-160 hash of bytes. + +WARNING: Only 80-bit security is provided by this function. This means an adversary who can compute roughly 2^80 +hashes will, with high probability, find a collision x_1 != x_2 such that RIPEMD-160(x_1) = RIPEMD-160(x_2). + + +
fun ripemd160_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun ripemd160_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `blake2b_256_internal` + +Returns the BLAKE2B-256 hash of bytes. + + +
fun blake2b_256_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun blake2b_256_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Specification + + +spec_sip_hash is not assumed to be injective. + + + + + +
fun spec_sip_hash(bytes: vector<u8>): u64;
+
+ + +spec_keccak256 is an injective function. + + + + + +
fun spec_keccak256(bytes: vector<u8>): vector<u8>;
+axiom forall b1: vector<u8>, b2: vector<u8>:
+    (spec_keccak256(b1) == spec_keccak256(b2) ==> b1 == b2);
+
+ + +spec_sha2_512_internal is an injective function. + + + + + +
fun spec_sha2_512_internal(bytes: vector<u8>): vector<u8>;
+axiom forall b1: vector<u8>, b2: vector<u8>:
+    (spec_sha2_512_internal(b1) == spec_sha2_512_internal(b2) ==> b1 == b2);
+
+ + +spec_sha3_512_internal is an injective function. + + + + + +
fun spec_sha3_512_internal(bytes: vector<u8>): vector<u8>;
+axiom forall b1: vector<u8>, b2: vector<u8>:
+    (spec_sha3_512_internal(b1) == spec_sha3_512_internal(b2) ==> b1 == b2);
+
+ + +spec_ripemd160_internal is an injective function. + + + + + +
fun spec_ripemd160_internal(bytes: vector<u8>): vector<u8>;
+axiom forall b1: vector<u8>, b2: vector<u8>:
+    (spec_ripemd160_internal(b1) == spec_ripemd160_internal(b2) ==> b1 == b2);
+
+ + +spec_blake2b_256_internal is an injective function. + + + + + +
fun spec_blake2b_256_internal(bytes: vector<u8>): vector<u8>;
+axiom forall b1: vector<u8>, b2: vector<u8>:
+    (spec_blake2b_256_internal(b1) == spec_blake2b_256_internal(b2) ==> b1 == b2);
+
+ + + + + +### Function `sip_hash` + + +
public fun sip_hash(bytes: vector<u8>): u64
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_sip_hash(bytes);
+
+ + + + + +### Function `sip_hash_from_value` + + +
public fun sip_hash_from_value<MoveValue>(v: &MoveValue): u64
+
+ + + + +
pragma opaque;
+ensures result == spec_sip_hash(bcs::serialize(v));
+
+ + + + + +### Function `keccak256` + + +
public fun keccak256(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_keccak256(bytes);
+
+ + + + + +### Function `sha2_512` + + +
public fun sha2_512(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if !features::spec_is_enabled(features::SHA_512_AND_RIPEMD_160_NATIVES);
+ensures result == spec_sha2_512_internal(bytes);
+
+ + + + + +### Function `sha3_512` + + +
public fun sha3_512(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if !features::spec_is_enabled(features::SHA_512_AND_RIPEMD_160_NATIVES);
+ensures result == spec_sha3_512_internal(bytes);
+
+ + + + + +### Function `ripemd160` + + +
public fun ripemd160(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if !features::spec_is_enabled(features::SHA_512_AND_RIPEMD_160_NATIVES);
+ensures result == spec_ripemd160_internal(bytes);
+
+ + + + + +### Function `blake2b_256` + + +
public fun blake2b_256(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if !features::spec_is_enabled(features::BLAKE2B_256_NATIVE);
+ensures result == spec_blake2b_256_internal(bytes);
+
+ + + + + +### Function `sha2_512_internal` + + +
fun sha2_512_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_sha2_512_internal(bytes);
+
+ + + + + +### Function `sha3_512_internal` + + +
fun sha3_512_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_sha3_512_internal(bytes);
+
+ + + + + +### Function `ripemd160_internal` + + +
fun ripemd160_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_ripemd160_internal(bytes);
+
+ + + + + +### Function `blake2b_256_internal` + + +
fun blake2b_256_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_blake2b_256_internal(bytes);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math128.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math128.md new file mode 100644 index 0000000000000..43dcaa13850cb --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math128.md @@ -0,0 +1,624 @@ + + + +# Module `0x1::math128` + +Standard math utilities missing in the Move Language. + + +- [Constants](#@Constants_0) +- [Function `max`](#0x1_math128_max) +- [Function `min`](#0x1_math128_min) +- [Function `average`](#0x1_math128_average) +- [Function `gcd`](#0x1_math128_gcd) +- [Function `lcm`](#0x1_math128_lcm) +- [Function `mul_div`](#0x1_math128_mul_div) +- [Function `clamp`](#0x1_math128_clamp) +- [Function `pow`](#0x1_math128_pow) +- [Function `floor_log2`](#0x1_math128_floor_log2) +- [Function `log2`](#0x1_math128_log2) +- [Function `log2_64`](#0x1_math128_log2_64) +- [Function `sqrt`](#0x1_math128_sqrt) +- [Function `ceil_div`](#0x1_math128_ceil_div) +- [Specification](#@Specification_1) + - [Function `max`](#@Specification_1_max) + - [Function `min`](#@Specification_1_min) + - [Function `average`](#@Specification_1_average) + - [Function `clamp`](#@Specification_1_clamp) + - [Function `pow`](#@Specification_1_pow) + - [Function `floor_log2`](#@Specification_1_floor_log2) + - [Function `sqrt`](#@Specification_1_sqrt) + + +
use 0x1::error;
+use 0x1::fixed_point32;
+use 0x1::fixed_point64;
+
+ + + + + +## Constants + + + + +Cannot log2 the value 0 + + +
const EINVALID_ARG_FLOOR_LOG2: u64 = 1;
+
+ + + + + +## Function `max` + +Return the largest of two numbers. + + +
public fun max(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public fun max(a: u128, b: u128): u128 {
+    if (a >= b) a else b
+}
+
+ + + +
+ + + +## Function `min` + +Return the smallest of two numbers. + + +
public fun min(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public fun min(a: u128, b: u128): u128 {
+    if (a < b) a else b
+}
+
+ + + +
+ + + +## Function `average` + +Return the average of two. + + +
public fun average(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public fun average(a: u128, b: u128): u128 {
+    if (a < b) {
+        a + (b - a) / 2
+    } else {
+        b + (a - b) / 2
+    }
+}
+
+ + + +
+ + + +## Function `gcd` + +Return greatest common divisor of a & b, via the Euclidean algorithm. + + +
public fun gcd(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public inline fun gcd(a: u128, b: u128): u128 {
+    let (large, small) = if (a > b) (a, b) else (b, a);
+    while (small != 0) {
+        let tmp = small;
+        small = large % small;
+        large = tmp;
+    };
+    large
+}
+
+ + + +
+ + + +## Function `lcm` + +Return least common multiple of a & b + + +
public fun lcm(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public inline fun lcm(a: u128, b: u128): u128 {
+    if (a == 0 || b == 0) {
+        0
+    } else {
+        a / gcd(a, b) * b
+    }
+}
+
+ + + +
+ + + +## Function `mul_div` + +Returns a * b / c going through u256 to prevent intermediate overflow + + +
public fun mul_div(a: u128, b: u128, c: u128): u128
+
+ + + +
+Implementation + + +
public inline fun mul_div(a: u128, b: u128, c: u128): u128 {
+    // Inline functions cannot take constants, as then every module using it needs the constant
+    assert!(c != 0, std::error::invalid_argument(4));
+    (((a as u256) * (b as u256) / (c as u256)) as u128)
+}
+
+ + + +
+ + + +## Function `clamp` + +Return x clamped to the interval [lower, upper]. + + +
public fun clamp(x: u128, lower: u128, upper: u128): u128
+
+ + + +
+Implementation + + +
public fun clamp(x: u128, lower: u128, upper: u128): u128 {
+    min(upper, max(lower, x))
+}
+
+ + + +
+ + + +## Function `pow` + +Return the value of n raised to power e + + +
public fun pow(n: u128, e: u128): u128
+
+ + + +
+Implementation + + +
public fun pow(n: u128, e: u128): u128 {
+    if (e == 0) {
+        1
+    } else {
+        let p = 1;
+        while (e > 1) {
+            if (e % 2 == 1) {
+                p = p * n;
+            };
+            e = e / 2;
+            n = n * n;
+        };
+        p * n
+    }
+}
+
+ + + +
+ + + +## Function `floor_log2` + +Returns floor(log2(x)) + + +
public fun floor_log2(x: u128): u8
+
+ + + +
+Implementation + + +
public fun floor_log2(x: u128): u8 {
+    let res = 0;
+    assert!(x != 0, std::error::invalid_argument(EINVALID_ARG_FLOOR_LOG2));
+    // Effectively the position of the most significant set bit
+    let n = 64;
+    while (n > 0) {
+        if (x >= (1 << n)) {
+            x = x >> n;
+            res = res + n;
+        };
+        n = n >> 1;
+    };
+    res
+}
+
+ + + +
+ + + +## Function `log2` + + + +
public fun log2(x: u128): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun log2(x: u128): FixedPoint32 {
+    let integer_part = floor_log2(x);
+    // Normalize x to [1, 2) in fixed point 32.
+    if (x >= 1 << 32) {
+        x = x >> (integer_part - 32);
+    } else {
+        x = x << (32 - integer_part);
+    };
+    let frac = 0;
+    let delta = 1 << 31;
+    while (delta != 0) {
+        // log x = 1/2 log x^2
+        // x in [1, 2)
+        x = (x * x) >> 32;
+        // x is now in [1, 4)
+        // if x in [2, 4) then log x = 1 + log (x / 2)
+        if (x >= (2 << 32)) { frac = frac + delta; x = x >> 1; };
+        delta = delta >> 1;
+    };
+    fixed_point32::create_from_raw_value (((integer_part as u64) << 32) + frac)
+}
+
+ + + +
+ + + +## Function `log2_64` + + + +
public fun log2_64(x: u128): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun log2_64(x: u128): FixedPoint64 {
+    let integer_part = floor_log2(x);
+    // Normalize x to [1, 2) in fixed point 63. To ensure x is smaller then 1<<64
+    if (x >= 1 << 63) {
+        x = x >> (integer_part - 63);
+    } else {
+        x = x << (63 - integer_part);
+    };
+    let frac = 0;
+    let delta = 1 << 63;
+    while (delta != 0) {
+        // log x = 1/2 log x^2
+        // x in [1, 2)
+        x = (x * x) >> 63;
+        // x is now in [1, 4)
+        // if x in [2, 4) then log x = 1 + log (x / 2)
+        if (x >= (2 << 63)) { frac = frac + delta; x = x >> 1; };
+        delta = delta >> 1;
+    };
+    fixed_point64::create_from_raw_value (((integer_part as u128) << 64) + frac)
+}
+
+ + + +
+ + + +## Function `sqrt` + +Returns square root of x, precisely floor(sqrt(x)) + + +
public fun sqrt(x: u128): u128
+
+ + + +
+Implementation + + +
public fun sqrt(x: u128): u128 {
+    if (x == 0) return 0;
+    // Note the plus 1 in the expression. Let n = floor_lg2(x) we have x in [2^n, 2^{n+1}) and thus the answer in
+    // the half-open interval [2^(n/2), 2^{(n+1)/2}). For even n we can write this as [2^(n/2), sqrt(2) 2^{n/2})
+    // for odd n [2^((n+1)/2)/sqrt(2), 2^((n+1)/2). For even n the left end point is integer for odd the right
+    // end point is integer. If we choose as our first approximation the integer end point we have as maximum
+    // relative error either (sqrt(2) - 1) or (1 - 1/sqrt(2)) both are smaller then 1/2.
+    let res = 1 << ((floor_log2(x) + 1) >> 1);
+    // We use standard newton-rhapson iteration to improve the initial approximation.
+    // The error term evolves as delta_i+1 = delta_i^2 / 2 (quadratic convergence).
+    // It turns out that after 5 iterations the delta is smaller than 2^-64 and thus below the treshold.
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    min(res, x / res)
+}
+
+ + + +
+ + + +## Function `ceil_div` + + + +
public fun ceil_div(x: u128, y: u128): u128
+
+ + + +
+Implementation + + +
public inline fun ceil_div(x: u128, y: u128): u128 {
+    // ceil_div(x, y) = floor((x + y - 1) / y) = floor((x - 1) / y) + 1
+    // (x + y - 1) could spuriously overflow. so we use the later version
+    if (x == 0) {
+        // Inline functions cannot take constants, as then every module using it needs the constant
+        assert!(y != 0, std::error::invalid_argument(4));
+        0
+    }
+    else (x - 1) / y + 1
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `max` + + +
public fun max(a: u128, b: u128): u128
+
+ + + + +
aborts_if false;
+ensures a >= b ==> result == a;
+ensures a < b ==> result == b;
+
+ + + + + +### Function `min` + + +
public fun min(a: u128, b: u128): u128
+
+ + + + +
aborts_if false;
+ensures a < b ==> result == a;
+ensures a >= b ==> result == b;
+
+ + + + + +### Function `average` + + +
public fun average(a: u128, b: u128): u128
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == (a + b) / 2;
+
+ + + + + +### Function `clamp` + + +
public fun clamp(x: u128, lower: u128, upper: u128): u128
+
+ + + + +
requires (lower <= upper);
+aborts_if false;
+ensures (lower <=x && x <= upper) ==> result == x;
+ensures (x < lower) ==> result == lower;
+ensures (upper < x) ==> result == upper;
+
+ + + + + +### Function `pow` + + +
public fun pow(n: u128, e: u128): u128
+
+ + + + +
pragma opaque;
+aborts_if [abstract] spec_pow(n, e) > MAX_U128;
+ensures [abstract] result == spec_pow(n, e);
+
+ + + + + +### Function `floor_log2` + + +
public fun floor_log2(x: u128): u8
+
+ + + + +
pragma opaque;
+aborts_if [abstract] x == 0;
+ensures [abstract] spec_pow(2, result) <= x;
+ensures [abstract] x < spec_pow(2, result+1);
+
+ + + + + +### Function `sqrt` + + +
public fun sqrt(x: u128): u128
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] x > 0 ==> result * result <= x;
+ensures [abstract] x > 0 ==> x < (result+1) * (result+1);
+
+ + + + + + + +
fun spec_pow(n: u128, e: u128): u128 {
+   if (e == 0) {
+       1
+   }
+   else {
+       n * spec_pow(n, e-1)
+   }
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math64.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math64.md new file mode 100644 index 0000000000000..da9e48cefda55 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math64.md @@ -0,0 +1,579 @@ + + + +# Module `0x1::math64` + +Standard math utilities missing in the Move Language. + + +- [Constants](#@Constants_0) +- [Function `max`](#0x1_math64_max) +- [Function `min`](#0x1_math64_min) +- [Function `average`](#0x1_math64_average) +- [Function `gcd`](#0x1_math64_gcd) +- [Function `lcm`](#0x1_math64_lcm) +- [Function `mul_div`](#0x1_math64_mul_div) +- [Function `clamp`](#0x1_math64_clamp) +- [Function `pow`](#0x1_math64_pow) +- [Function `floor_log2`](#0x1_math64_floor_log2) +- [Function `log2`](#0x1_math64_log2) +- [Function `sqrt`](#0x1_math64_sqrt) +- [Function `ceil_div`](#0x1_math64_ceil_div) +- [Specification](#@Specification_1) + - [Function `max`](#@Specification_1_max) + - [Function `min`](#@Specification_1_min) + - [Function `average`](#@Specification_1_average) + - [Function `clamp`](#@Specification_1_clamp) + - [Function `pow`](#@Specification_1_pow) + - [Function `floor_log2`](#@Specification_1_floor_log2) + - [Function `sqrt`](#@Specification_1_sqrt) + + +
use 0x1::error;
+use 0x1::fixed_point32;
+
+ + + + + +## Constants + + + + +Cannot log2 the value 0 + + +
const EINVALID_ARG_FLOOR_LOG2: u64 = 1;
+
+ + + + + +## Function `max` + +Return the largest of two numbers. + + +
public fun max(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public fun max(a: u64, b: u64): u64 {
+    if (a >= b) a else b
+}
+
+ + + +
+ + + +## Function `min` + +Return the smallest of two numbers. + + +
public fun min(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public fun min(a: u64, b: u64): u64 {
+    if (a < b) a else b
+}
+
+ + + +
+ + + +## Function `average` + +Return the average of two. + + +
public fun average(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public fun average(a: u64, b: u64): u64 {
+    if (a < b) {
+        a + (b - a) / 2
+    } else {
+        b + (a - b) / 2
+    }
+}
+
+ + + +
+ + + +## Function `gcd` + +Return greatest common divisor of a & b, via the Euclidean algorithm. + + +
public fun gcd(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public inline fun gcd(a: u64, b: u64): u64 {
+    let (large, small) = if (a > b) (a, b) else (b, a);
+    while (small != 0) {
+        let tmp = small;
+        small = large % small;
+        large = tmp;
+    };
+    large
+}
+
+ + + +
+ + + +## Function `lcm` + +Returns least common multiple of a & b. + + +
public fun lcm(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public inline fun lcm(a: u64, b: u64): u64 {
+    if (a == 0 || b == 0) {
+        0
+    } else {
+        a / gcd(a, b) * b
+    }
+}
+
+ + + +
+ + + +## Function `mul_div` + +Returns a * b / c going through u128 to prevent intermediate overflow + + +
public fun mul_div(a: u64, b: u64, c: u64): u64
+
+ + + +
+Implementation + + +
public inline fun mul_div(a: u64, b: u64, c: u64): u64 {
+    // Inline functions cannot take constants, as then every module using it needs the constant
+    assert!(c != 0, std::error::invalid_argument(4));
+    (((a as u128) * (b as u128) / (c as u128)) as u64)
+}
+
+ + + +
+ + + +## Function `clamp` + +Return x clamped to the interval [lower, upper]. + + +
public fun clamp(x: u64, lower: u64, upper: u64): u64
+
+ + + +
+Implementation + + +
public fun clamp(x: u64, lower: u64, upper: u64): u64 {
+    min(upper, max(lower, x))
+}
+
+ + + +
+ + + +## Function `pow` + +Return the value of n raised to power e + + +
public fun pow(n: u64, e: u64): u64
+
+ + + +
+Implementation + + +
public fun pow(n: u64, e: u64): u64 {
+    if (e == 0) {
+        1
+    } else {
+        let p = 1;
+        while (e > 1) {
+            if (e % 2 == 1) {
+                p = p * n;
+            };
+            e = e / 2;
+            n = n * n;
+        };
+        p * n
+    }
+}
+
+ + + +
+ + + +## Function `floor_log2` + +Returns floor(lg2(x)) + + +
public fun floor_log2(x: u64): u8
+
+ + + +
+Implementation + + +
public fun floor_log2(x: u64): u8 {
+    let res = 0;
+    assert!(x != 0, std::error::invalid_argument(EINVALID_ARG_FLOOR_LOG2));
+    // Effectively the position of the most significant set bit
+    let n = 32;
+    while (n > 0) {
+        if (x >= (1 << n)) {
+            x = x >> n;
+            res = res + n;
+        };
+        n = n >> 1;
+    };
+    res
+}
+
+ + + +
+ + + +## Function `log2` + + + +
public fun log2(x: u64): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun log2(x: u64): FixedPoint32 {
+    let integer_part = floor_log2(x);
+    // Normalize x to [1, 2) in fixed point 32.
+    let y = (if (x >= 1 << 32) {
+        x >> (integer_part - 32)
+    } else {
+        x << (32 - integer_part)
+    } as u128);
+    let frac = 0;
+    let delta = 1 << 31;
+    while (delta != 0) {
+        // log x = 1/2 log x^2
+        // x in [1, 2)
+        y = (y * y) >> 32;
+        // x is now in [1, 4)
+        // if x in [2, 4) then log x = 1 + log (x / 2)
+        if (y >= (2 << 32)) { frac = frac + delta; y = y >> 1; };
+        delta = delta >> 1;
+    };
+    fixed_point32::create_from_raw_value (((integer_part as u64) << 32) + frac)
+}
+
+ + + +
+ + + +## Function `sqrt` + +Returns square root of x, precisely floor(sqrt(x)) + + +
public fun sqrt(x: u64): u64
+
+ + + +
+Implementation + + +
public fun sqrt(x: u64): u64 {
+    if (x == 0) return 0;
+    // Note the plus 1 in the expression. Let n = floor_lg2(x) we have x in [2^n, 2^(n+1)> and thus the answer in
+    // the half-open interval [2^(n/2), 2^((n+1)/2)>. For even n we can write this as [2^(n/2), sqrt(2) 2^(n/2)>
+    // for odd n [2^((n+1)/2)/sqrt(2), 2^((n+1)/2>. For even n the left end point is integer for odd the right
+    // end point is integer. If we choose as our first approximation the integer end point we have as maximum
+    // relative error either (sqrt(2) - 1) or (1 - 1/sqrt(2)) both are smaller then 1/2.
+    let res = 1 << ((floor_log2(x) + 1) >> 1);
+    // We use standard newton-rhapson iteration to improve the initial approximation.
+    // The error term evolves as delta_i+1 = delta_i^2 / 2 (quadratic convergence).
+    // It turns out that after 4 iterations the delta is smaller than 2^-32 and thus below the treshold.
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    res = (res + x / res) >> 1;
+    min(res, x / res)
+}
+
+ + + +
+ + + +## Function `ceil_div` + + + +
public fun ceil_div(x: u64, y: u64): u64
+
+ + + +
+Implementation + + +
public inline fun ceil_div(x: u64, y: u64): u64 {
+    // ceil_div(x, y) = floor((x + y - 1) / y) = floor((x - 1) / y) + 1
+    // (x + y - 1) could spuriously overflow. so we use the later version
+    if (x == 0) {
+        // Inline functions cannot take constants, as then every module using it needs the constant
+        assert!(y != 0, std::error::invalid_argument(4));
+        0
+    }
+    else (x - 1) / y + 1
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `max` + + +
public fun max(a: u64, b: u64): u64
+
+ + + + +
aborts_if false;
+ensures a >= b ==> result == a;
+ensures a < b ==> result == b;
+
+ + + + + +### Function `min` + + +
public fun min(a: u64, b: u64): u64
+
+ + + + +
aborts_if false;
+ensures a < b ==> result == a;
+ensures a >= b ==> result == b;
+
+ + + + + +### Function `average` + + +
public fun average(a: u64, b: u64): u64
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == (a + b) / 2;
+
+ + + + + +### Function `clamp` + + +
public fun clamp(x: u64, lower: u64, upper: u64): u64
+
+ + + + +
requires (lower <= upper);
+aborts_if false;
+ensures (lower <=x && x <= upper) ==> result == x;
+ensures (x < lower) ==> result == lower;
+ensures (upper < x) ==> result == upper;
+
+ + + + + +### Function `pow` + + +
public fun pow(n: u64, e: u64): u64
+
+ + + + +
pragma opaque;
+aborts_if [abstract] spec_pow(n, e) > MAX_U64;
+ensures [abstract] result == spec_pow(n, e);
+
+ + + + + +### Function `floor_log2` + + +
public fun floor_log2(x: u64): u8
+
+ + + + +
pragma opaque;
+aborts_if [abstract] x == 0;
+ensures [abstract] spec_pow(2, result) <= x;
+ensures [abstract] x < spec_pow(2, result+1);
+
+ + + + + +### Function `sqrt` + + +
public fun sqrt(x: u64): u64
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] x > 0 ==> result * result <= x;
+ensures [abstract] x > 0 ==> x < (result+1) * (result+1);
+
+ + + + + + + +
fun spec_pow(n: u64, e: u64): u64 {
+   if (e == 0) {
+       1
+   }
+   else {
+       n * spec_pow(n, e-1)
+   }
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math_fixed.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math_fixed.md new file mode 100644 index 0000000000000..b1099005aad39 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math_fixed.md @@ -0,0 +1,299 @@ + + + +# Module `0x1::math_fixed` + +Standard math utilities missing in the Move Language. + + +- [Constants](#@Constants_0) +- [Function `sqrt`](#0x1_math_fixed_sqrt) +- [Function `exp`](#0x1_math_fixed_exp) +- [Function `log2_plus_32`](#0x1_math_fixed_log2_plus_32) +- [Function `ln_plus_32ln2`](#0x1_math_fixed_ln_plus_32ln2) +- [Function `pow`](#0x1_math_fixed_pow) +- [Function `mul_div`](#0x1_math_fixed_mul_div) +- [Function `exp_raw`](#0x1_math_fixed_exp_raw) +- [Function `pow_raw`](#0x1_math_fixed_pow_raw) + + +
use 0x1::error;
+use 0x1::fixed_point32;
+use 0x1::math128;
+
+ + + + + +## Constants + + + + +Abort code on overflow + + +
const EOVERFLOW_EXP: u64 = 1;
+
+ + + + + +Natural log 2 in 32 bit fixed point + + +
const LN2: u128 = 2977044472;
+
+ + + + + + + +
const LN2_X_32: u64 = 95265423104;
+
+ + + + + +## Function `sqrt` + +Square root of fixed point number + + +
public fun sqrt(x: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun sqrt(x: FixedPoint32): FixedPoint32 {
+    let y = (fixed_point32::get_raw_value(x) as u128);
+    fixed_point32::create_from_raw_value((math128::sqrt(y << 32) as u64))
+}
+
+ + + +
+ + + +## Function `exp` + +Exponent function with a precission of 9 digits. + + +
public fun exp(x: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun exp(x: FixedPoint32): FixedPoint32 {
+    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    fixed_point32::create_from_raw_value((exp_raw(raw_value) as u64))
+}
+
+ + + +
+ + + +## Function `log2_plus_32` + +Because log2 is negative for values < 1 we instead return log2(x) + 32 which +is positive for all values of x. + + +
public fun log2_plus_32(x: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun log2_plus_32(x: FixedPoint32): FixedPoint32 {
+    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    math128::log2(raw_value)
+}
+
+ + + +
+ + + +## Function `ln_plus_32ln2` + + + +
public fun ln_plus_32ln2(x: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun ln_plus_32ln2(x: FixedPoint32): FixedPoint32 {
+    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    let x = (fixed_point32::get_raw_value(math128::log2(raw_value)) as u128);
+    fixed_point32::create_from_raw_value((x * LN2 >> 32 as u64))
+}
+
+ + + +
+ + + +## Function `pow` + +Integer power of a fixed point number + + +
public fun pow(x: fixed_point32::FixedPoint32, n: u64): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun pow(x: FixedPoint32, n: u64): FixedPoint32 {
+    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    fixed_point32::create_from_raw_value((pow_raw(raw_value, (n as u128)) as u64))
+}
+
+ + + +
+ + + +## Function `mul_div` + +Specialized function for x * y / z that omits intermediate shifting + + +
public fun mul_div(x: fixed_point32::FixedPoint32, y: fixed_point32::FixedPoint32, z: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun mul_div(x: FixedPoint32, y: FixedPoint32, z: FixedPoint32): FixedPoint32 {
+    let a = fixed_point32::get_raw_value(x);
+    let b = fixed_point32::get_raw_value(y);
+    let c = fixed_point32::get_raw_value(z);
+    fixed_point32::create_from_raw_value (math64::mul_div(a, b, c))
+}
+
+ + + +
+ + + +## Function `exp_raw` + + + +
fun exp_raw(x: u128): u128
+
+ + + +
+Implementation + + +
fun exp_raw(x: u128): u128 {
+    // exp(x / 2^32) = 2^(x / (2^32 * ln(2))) = 2^(floor(x / (2^32 * ln(2))) + frac(x / (2^32 * ln(2))))
+    let shift_long = x / LN2;
+    assert!(shift_long <= 31, std::error::invalid_state(EOVERFLOW_EXP));
+    let shift = (shift_long as u8);
+    let remainder = x % LN2;
+    // At this point we want to calculate 2^(remainder / ln2) << shift
+    // ln2 = 595528 * 4999 which means
+    let bigfactor = 595528;
+    let exponent = remainder / bigfactor;
+    let x = remainder % bigfactor;
+    // 2^(remainder / ln2) = (2^(1/4999))^exponent * exp(x / 2^32)
+    let roottwo = 4295562865;  // fixed point representation of 2^(1/4999)
+    // This has an error of 5000 / 4 10^9 roughly 6 digits of precission
+    let power = pow_raw(roottwo, exponent);
+    let eps_correction = 1241009291;
+    power = power + ((power * eps_correction * exponent) >> 64);
+    // x is fixed point number smaller than 595528/2^32 < 0.00014 so we need only 2 tayler steps
+    // to get the 6 digits of precission
+    let taylor1 = (power * x) >> (32 - shift);
+    let taylor2 = (taylor1 * x) >> 32;
+    let taylor3 = (taylor2 * x) >> 32;
+    (power << shift) + taylor1 + taylor2 / 2 + taylor3 / 6
+}
+
+ + + +
+ + + +## Function `pow_raw` + + + +
fun pow_raw(x: u128, n: u128): u128
+
+ + + +
+Implementation + + +
fun pow_raw(x: u128, n: u128): u128 {
+    let res: u256 = 1 << 64;
+    x = x << 32;
+    while (n != 0) {
+        if (n & 1 != 0) {
+            res = (res * (x as u256)) >> 64;
+        };
+        n = n >> 1;
+        x = ((((x as u256) * (x as u256)) >> 64) as u128);
+    };
+    ((res >> 32) as u128)
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math_fixed64.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math_fixed64.md new file mode 100644 index 0000000000000..be0bd197ae48a --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/math_fixed64.md @@ -0,0 +1,294 @@ + + + +# Module `0x1::math_fixed64` + +Standard math utilities missing in the Move Language. + + +- [Constants](#@Constants_0) +- [Function `sqrt`](#0x1_math_fixed64_sqrt) +- [Function `exp`](#0x1_math_fixed64_exp) +- [Function `log2_plus_64`](#0x1_math_fixed64_log2_plus_64) +- [Function `ln_plus_32ln2`](#0x1_math_fixed64_ln_plus_32ln2) +- [Function `pow`](#0x1_math_fixed64_pow) +- [Function `mul_div`](#0x1_math_fixed64_mul_div) +- [Function `exp_raw`](#0x1_math_fixed64_exp_raw) +- [Function `pow_raw`](#0x1_math_fixed64_pow_raw) + + +
use 0x1::error;
+use 0x1::fixed_point64;
+use 0x1::math128;
+
+ + + + + +## Constants + + + + +Abort code on overflow + + +
const EOVERFLOW_EXP: u64 = 1;
+
+ + + + + +Natural log 2 in 32 bit fixed point + + +
const LN2: u256 = 12786308645202655660;
+
+ + + + + +## Function `sqrt` + +Square root of fixed point number + + +
public fun sqrt(x: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun sqrt(x: FixedPoint64): FixedPoint64 {
+    let y = fixed_point64::get_raw_value(x);
+    let z = (math128::sqrt(y) << 32 as u256);
+    z = (z + ((y as u256) << 64) / z) >> 1;
+    fixed_point64::create_from_raw_value((z as u128))
+}
+
+ + + +
+ + + +## Function `exp` + +Exponent function with a precission of 9 digits. + + +
public fun exp(x: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun exp(x: FixedPoint64): FixedPoint64 {
+    let raw_value = (fixed_point64::get_raw_value(x) as u256);
+    fixed_point64::create_from_raw_value((exp_raw(raw_value) as u128))
+}
+
+ + + +
+ + + +## Function `log2_plus_64` + +Because log2 is negative for values < 1 we instead return log2(x) + 64 which +is positive for all values of x. + + +
public fun log2_plus_64(x: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun log2_plus_64(x: FixedPoint64): FixedPoint64 {
+    let raw_value = (fixed_point64::get_raw_value(x) as u128);
+    math128::log2_64(raw_value)
+}
+
+ + + +
+ + + +## Function `ln_plus_32ln2` + + + +
public fun ln_plus_32ln2(x: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun ln_plus_32ln2(x: FixedPoint64): FixedPoint64 {
+    let raw_value = fixed_point64::get_raw_value(x);
+    let x = (fixed_point64::get_raw_value(math128::log2_64(raw_value)) as u256);
+    fixed_point64::create_from_raw_value(((x * LN2) >> 64 as u128))
+}
+
+ + + +
+ + + +## Function `pow` + +Integer power of a fixed point number + + +
public fun pow(x: fixed_point64::FixedPoint64, n: u64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun pow(x: FixedPoint64, n: u64): FixedPoint64 {
+    let raw_value = (fixed_point64::get_raw_value(x) as u256);
+    fixed_point64::create_from_raw_value((pow_raw(raw_value, (n as u128)) as u128))
+}
+
+ + + +
+ + + +## Function `mul_div` + +Specialized function for x * y / z that omits intermediate shifting + + +
public fun mul_div(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64, z: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public fun mul_div(x: FixedPoint64, y: FixedPoint64, z: FixedPoint64): FixedPoint64 {
+    let a = fixed_point64::get_raw_value(x);
+    let b = fixed_point64::get_raw_value(y);
+    let c = fixed_point64::get_raw_value(z);
+    fixed_point64::create_from_raw_value (math128::mul_div(a, b, c))
+}
+
+ + + +
+ + + +## Function `exp_raw` + + + +
fun exp_raw(x: u256): u256
+
+ + + +
+Implementation + + +
fun exp_raw(x: u256): u256 {
+    // exp(x / 2^64) = 2^(x / (2^64 * ln(2))) = 2^(floor(x / (2^64 * ln(2))) + frac(x / (2^64 * ln(2))))
+    let shift_long = x / LN2;
+    assert!(shift_long <= 63, std::error::invalid_state(EOVERFLOW_EXP));
+    let shift = (shift_long as u8);
+    let remainder = x % LN2;
+    // At this point we want to calculate 2^(remainder / ln2) << shift
+    // ln2 = 580 * 22045359733108027
+    let bigfactor = 22045359733108027;
+    let exponent = remainder / bigfactor;
+    let x = remainder % bigfactor;
+    // 2^(remainder / ln2) = (2^(1/580))^exponent * exp(x / 2^64)
+    let roottwo = 18468802611690918839;  // fixed point representation of 2^(1/580)
+    // 2^(1/580) = roottwo(1 - eps), so the number we seek is roottwo^exponent (1 - eps * exponent)
+    let power = pow_raw(roottwo, (exponent as u128));
+    let eps_correction = 219071715585908898;
+    power = power - ((power * eps_correction * exponent) >> 128);
+    // x is fixed point number smaller than bigfactor/2^64 < 0.0011 so we need only 5 tayler steps
+    // to get the 15 digits of precission
+    let taylor1 = (power * x) >> (64 - shift);
+    let taylor2 = (taylor1 * x) >> 64;
+    let taylor3 = (taylor2 * x) >> 64;
+    let taylor4 = (taylor3 * x) >> 64;
+    let taylor5 = (taylor4 * x) >> 64;
+    let taylor6 = (taylor5 * x) >> 64;
+    (power << shift) + taylor1 + taylor2 / 2 + taylor3 / 6 + taylor4 / 24 + taylor5 / 120 + taylor6 / 720
+}
+
+ + + +
+ + + +## Function `pow_raw` + + + +
fun pow_raw(x: u256, n: u128): u256
+
+ + + +
+Implementation + + +
fun pow_raw(x: u256, n: u128): u256 {
+    let res: u256 = 1 << 64;
+    while (n != 0) {
+        if (n & 1 != 0) {
+            res = (res * x) >> 64;
+        };
+        n = n >> 1;
+        x = (x * x) >> 64;
+    };
+    res
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/multi_ed25519.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/multi_ed25519.md new file mode 100644 index 0000000000000..1105c7f983be6 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/multi_ed25519.md @@ -0,0 +1,1324 @@ + + + +# Module `0x1::multi_ed25519` + +Exports MultiEd25519 multi-signatures in Move. +This module has the exact same interface as the Ed25519 module. + + +- [Struct `UnvalidatedPublicKey`](#0x1_multi_ed25519_UnvalidatedPublicKey) +- [Struct `ValidatedPublicKey`](#0x1_multi_ed25519_ValidatedPublicKey) +- [Struct `Signature`](#0x1_multi_ed25519_Signature) +- [Constants](#@Constants_0) +- [Function `new_unvalidated_public_key_from_bytes`](#0x1_multi_ed25519_new_unvalidated_public_key_from_bytes) +- [Function `new_validated_public_key_from_bytes`](#0x1_multi_ed25519_new_validated_public_key_from_bytes) +- [Function `new_validated_public_key_from_bytes_v2`](#0x1_multi_ed25519_new_validated_public_key_from_bytes_v2) +- [Function `new_signature_from_bytes`](#0x1_multi_ed25519_new_signature_from_bytes) +- [Function `public_key_to_unvalidated`](#0x1_multi_ed25519_public_key_to_unvalidated) +- [Function `public_key_into_unvalidated`](#0x1_multi_ed25519_public_key_into_unvalidated) +- [Function `unvalidated_public_key_to_bytes`](#0x1_multi_ed25519_unvalidated_public_key_to_bytes) +- [Function `validated_public_key_to_bytes`](#0x1_multi_ed25519_validated_public_key_to_bytes) +- [Function `signature_to_bytes`](#0x1_multi_ed25519_signature_to_bytes) +- [Function `public_key_validate`](#0x1_multi_ed25519_public_key_validate) +- [Function `public_key_validate_v2`](#0x1_multi_ed25519_public_key_validate_v2) +- [Function `signature_verify_strict`](#0x1_multi_ed25519_signature_verify_strict) +- [Function `signature_verify_strict_t`](#0x1_multi_ed25519_signature_verify_strict_t) +- [Function `unvalidated_public_key_to_authentication_key`](#0x1_multi_ed25519_unvalidated_public_key_to_authentication_key) +- [Function `unvalidated_public_key_num_sub_pks`](#0x1_multi_ed25519_unvalidated_public_key_num_sub_pks) +- [Function `unvalidated_public_key_threshold`](#0x1_multi_ed25519_unvalidated_public_key_threshold) +- [Function `validated_public_key_to_authentication_key`](#0x1_multi_ed25519_validated_public_key_to_authentication_key) +- [Function `validated_public_key_num_sub_pks`](#0x1_multi_ed25519_validated_public_key_num_sub_pks) +- [Function `validated_public_key_threshold`](#0x1_multi_ed25519_validated_public_key_threshold) +- [Function `check_and_get_threshold`](#0x1_multi_ed25519_check_and_get_threshold) +- [Function `public_key_bytes_to_authentication_key`](#0x1_multi_ed25519_public_key_bytes_to_authentication_key) +- [Function `public_key_validate_internal`](#0x1_multi_ed25519_public_key_validate_internal) +- [Function `public_key_validate_v2_internal`](#0x1_multi_ed25519_public_key_validate_v2_internal) +- [Function `signature_verify_strict_internal`](#0x1_multi_ed25519_signature_verify_strict_internal) +- [Specification](#@Specification_1) + - [Function `new_unvalidated_public_key_from_bytes`](#@Specification_1_new_unvalidated_public_key_from_bytes) + - [Function `new_validated_public_key_from_bytes`](#@Specification_1_new_validated_public_key_from_bytes) + - [Function `new_validated_public_key_from_bytes_v2`](#@Specification_1_new_validated_public_key_from_bytes_v2) + - [Function `new_signature_from_bytes`](#@Specification_1_new_signature_from_bytes) + - [Function `unvalidated_public_key_num_sub_pks`](#@Specification_1_unvalidated_public_key_num_sub_pks) + - [Function `unvalidated_public_key_threshold`](#@Specification_1_unvalidated_public_key_threshold) + - [Function `validated_public_key_num_sub_pks`](#@Specification_1_validated_public_key_num_sub_pks) + - [Function `validated_public_key_threshold`](#@Specification_1_validated_public_key_threshold) + - [Function `check_and_get_threshold`](#@Specification_1_check_and_get_threshold) + - [Function `public_key_bytes_to_authentication_key`](#@Specification_1_public_key_bytes_to_authentication_key) + - [Function `public_key_validate_internal`](#@Specification_1_public_key_validate_internal) + - [Function `public_key_validate_v2_internal`](#@Specification_1_public_key_validate_v2_internal) + - [Function `signature_verify_strict_internal`](#@Specification_1_signature_verify_strict_internal) + - [Helper functions](#@Helper_functions_2) + + +
use 0x1::bcs;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::features;
+use 0x1::hash;
+use 0x1::option;
+
+ + + + + +## Struct `UnvalidatedPublicKey` + +An *unvalidated*, k out of n MultiEd25519 public key. The bytes field contains (1) several chunks of +ed25519::PUBLIC_KEY_NUM_BYTES bytes, each encoding a Ed25519 PK, and (2) a single byte encoding the threshold k. +*Unvalidated* means there is no guarantee that the underlying PKs are valid elliptic curve points of non-small +order. + + +
struct UnvalidatedPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `ValidatedPublicKey` + +A *validated* k out of n MultiEd25519 public key. *Validated* means that all the underlying PKs will be +elliptic curve points that are NOT of small-order. It does not necessarily mean they will be prime-order points. +This struct encodes the public key exactly as UnvalidatedPublicKey. + +For now, this struct is not used in any verification functions, but it might be in the future. + + +
struct ValidatedPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `Signature` + +A purported MultiEd25519 multi-signature that can be verified via signature_verify_strict or +signature_verify_strict_t. The bytes field contains (1) several chunks of ed25519::SIGNATURE_NUM_BYTES +bytes, each encoding a Ed25519 signature, and (2) a BITMAP_NUM_OF_BYTES-byte bitmap encoding the signer +identities. + + +
struct Signature has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The native functions have not been rolled out yet. + + +
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 4;
+
+ + + + + +Wrong number of bytes were given as input when deserializing an Ed25519 public key. + + +
const E_WRONG_PUBKEY_SIZE: u64 = 1;
+
+ + + + + +Wrong number of bytes were given as input when deserializing an Ed25519 signature. + + +
const E_WRONG_SIGNATURE_SIZE: u64 = 2;
+
+ + + + + +The identifier of the MultiEd25519 signature scheme, which is used when deriving Aptos authentication keys by hashing +it together with an MultiEd25519 public key. + + +
const SIGNATURE_SCHEME_ID: u8 = 1;
+
+ + + + + +When serializing a MultiEd25519 signature, the bitmap that indicates the signers will be encoded using this many +bytes. + + +
const BITMAP_NUM_OF_BYTES: u64 = 4;
+
+ + + + + +The threshold must be in the range [1, n], where n is the total number of signers. + + +
const E_INVALID_THRESHOLD_OR_NUMBER_OF_SIGNERS: u64 = 3;
+
+ + + + + +The size of an individual Ed25519 public key, in bytes. +(A MultiEd25519 public key consists of several of these, plus the threshold.) + + +
const INDIVIDUAL_PUBLIC_KEY_NUM_BYTES: u64 = 32;
+
+ + + + + +The size of an individual Ed25519 signature, in bytes. +(A MultiEd25519 signature consists of several of these, plus the signer bitmap.) + + +
const INDIVIDUAL_SIGNATURE_NUM_BYTES: u64 = 64;
+
+ + + + + +Max number of ed25519 public keys allowed in multi-ed25519 keys + + +
const MAX_NUMBER_OF_PUBLIC_KEYS: u64 = 32;
+
+ + + + + +When serializing a MultiEd25519 public key, the threshold k will be encoded using this many bytes. + + +
const THRESHOLD_SIZE_BYTES: u64 = 1;
+
+ + + + + +## Function `new_unvalidated_public_key_from_bytes` + +Parses the input 32 bytes as an *unvalidated* MultiEd25519 public key. + +NOTE: This function could have also checked that the # of sub-PKs is > 0, but it did not. However, since such +invalid PKs are rejected during signature verification (see bugfix_unvalidated_pk_from_zero_subpks) they +will not cause problems. + +We could fix this API by adding a new one that checks the # of sub-PKs is > 0, but it is likely not a good idea +to reproduce the PK validation logic in Move. We should not have done so in the first place. Instead, we will +leave it as is and continue assuming UnvalidatedPublicKey objects could be invalid PKs that will safely be +rejected during signature verification. + + +
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): multi_ed25519::UnvalidatedPublicKey
+
+ + + +
+Implementation + + +
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): UnvalidatedPublicKey {
+    let len = vector::length(&bytes);
+    let num_sub_pks = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
+
+    assert!(num_sub_pks <= MAX_NUMBER_OF_PUBLIC_KEYS, error::invalid_argument(E_WRONG_PUBKEY_SIZE));
+    assert!(len % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES, error::invalid_argument(E_WRONG_PUBKEY_SIZE));
+    UnvalidatedPublicKey { bytes }
+}
+
+ + + +
+ + + +## Function `new_validated_public_key_from_bytes` + +DEPRECATED: Use new_validated_public_key_from_bytes_v2 instead. See public_key_validate_internal comments. + +(Incorrectly) parses the input bytes as a *validated* MultiEd25519 public key. + + +
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): option::Option<multi_ed25519::ValidatedPublicKey>
+
+ + + +
+Implementation + + +
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): Option<ValidatedPublicKey> {
+    // Note that `public_key_validate_internal` will check that `vector::length(&bytes) / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES <= MAX_NUMBER_OF_PUBLIC_KEYS`.
+    if (vector::length(&bytes) % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES &&
+        public_key_validate_internal(bytes)) {
+        option::some(ValidatedPublicKey {
+            bytes
+        })
+    } else {
+        option::none<ValidatedPublicKey>()
+    }
+}
+
+ + + +
+ + + +## Function `new_validated_public_key_from_bytes_v2` + +Parses the input bytes as a *validated* MultiEd25519 public key (see public_key_validate_internal_v2). + + +
public fun new_validated_public_key_from_bytes_v2(bytes: vector<u8>): option::Option<multi_ed25519::ValidatedPublicKey>
+
+ + + +
+Implementation + + +
public fun new_validated_public_key_from_bytes_v2(bytes: vector<u8>): Option<ValidatedPublicKey> {
+    if (!features::multi_ed25519_pk_validate_v2_enabled()) {
+        abort(error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    if (public_key_validate_v2_internal(bytes)) {
+        option::some(ValidatedPublicKey {
+            bytes
+        })
+    } else {
+        option::none<ValidatedPublicKey>()
+    }
+}
+
+ + + +
+ + + +## Function `new_signature_from_bytes` + +Parses the input bytes as a purported MultiEd25519 multi-signature. + + +
public fun new_signature_from_bytes(bytes: vector<u8>): multi_ed25519::Signature
+
+ + + +
+Implementation + + +
public fun new_signature_from_bytes(bytes: vector<u8>): Signature {
+    assert!(vector::length(&bytes) % INDIVIDUAL_SIGNATURE_NUM_BYTES == BITMAP_NUM_OF_BYTES, error::invalid_argument(E_WRONG_SIGNATURE_SIZE));
+    Signature { bytes }
+}
+
+ + + +
+ + + +## Function `public_key_to_unvalidated` + +Converts a ValidatedPublicKey to an UnvalidatedPublicKey, which can be used in the strict verification APIs. + + +
public fun public_key_to_unvalidated(pk: &multi_ed25519::ValidatedPublicKey): multi_ed25519::UnvalidatedPublicKey
+
+ + + +
+Implementation + + +
public fun public_key_to_unvalidated(pk: &ValidatedPublicKey): UnvalidatedPublicKey {
+    UnvalidatedPublicKey {
+        bytes: pk.bytes
+    }
+}
+
+ + + +
+ + + +## Function `public_key_into_unvalidated` + +Moves a ValidatedPublicKey into an UnvalidatedPublicKey, which can be used in the strict verification APIs. + + +
public fun public_key_into_unvalidated(pk: multi_ed25519::ValidatedPublicKey): multi_ed25519::UnvalidatedPublicKey
+
+ + + +
+Implementation + + +
public fun public_key_into_unvalidated(pk: ValidatedPublicKey): UnvalidatedPublicKey {
+    UnvalidatedPublicKey {
+        bytes: pk.bytes
+    }
+}
+
+ + + +
+ + + +## Function `unvalidated_public_key_to_bytes` + +Serializes an UnvalidatedPublicKey struct to 32-bytes. + + +
public fun unvalidated_public_key_to_bytes(pk: &multi_ed25519::UnvalidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun unvalidated_public_key_to_bytes(pk: &UnvalidatedPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `validated_public_key_to_bytes` + +Serializes a ValidatedPublicKey struct to 32-bytes. + + +
public fun validated_public_key_to_bytes(pk: &multi_ed25519::ValidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun validated_public_key_to_bytes(pk: &ValidatedPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `signature_to_bytes` + +Serializes a Signature struct to 64-bytes. + + +
public fun signature_to_bytes(sig: &multi_ed25519::Signature): vector<u8>
+
+ + + +
+Implementation + + +
public fun signature_to_bytes(sig: &Signature): vector<u8> {
+    sig.bytes
+}
+
+ + + +
+ + + +## Function `public_key_validate` + +DEPRECATED: Use public_key_validate_v2 instead. See public_key_validate_internal comments. + +Takes in an *unvalidated* public key and attempts to validate it. +Returns Some(ValidatedPublicKey) if successful and None otherwise. + + +
public fun public_key_validate(pk: &multi_ed25519::UnvalidatedPublicKey): option::Option<multi_ed25519::ValidatedPublicKey>
+
+ + + +
+Implementation + + +
public fun public_key_validate(pk: &UnvalidatedPublicKey): Option<ValidatedPublicKey> {
+    new_validated_public_key_from_bytes(pk.bytes)
+}
+
+ + + +
+ + + +## Function `public_key_validate_v2` + +Takes in an *unvalidated* public key and attempts to validate it. +Returns Some(ValidatedPublicKey) if successful and None otherwise. + + +
public fun public_key_validate_v2(pk: &multi_ed25519::UnvalidatedPublicKey): option::Option<multi_ed25519::ValidatedPublicKey>
+
+ + + +
+Implementation + + +
public fun public_key_validate_v2(pk: &UnvalidatedPublicKey): Option<ValidatedPublicKey> {
+    new_validated_public_key_from_bytes_v2(pk.bytes)
+}
+
+ + + +
+ + + +## Function `signature_verify_strict` + +Verifies a purported MultiEd25519 multisignature under an *unvalidated* public_key on the specified message. +This call will validate the public key by checking it is NOT in the small subgroup. + + +
public fun signature_verify_strict(multisignature: &multi_ed25519::Signature, public_key: &multi_ed25519::UnvalidatedPublicKey, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun signature_verify_strict(
+    multisignature: &Signature,
+    public_key: &UnvalidatedPublicKey,
+    message: vector<u8>
+): bool {
+    signature_verify_strict_internal(multisignature.bytes, public_key.bytes, message)
+}
+
+ + + +
+ + + +## Function `signature_verify_strict_t` + +This function is used to verify a multi-signature on any BCS-serializable type T. For now, it is used to verify the +proof of private key ownership when rotating authentication keys. + + +
public fun signature_verify_strict_t<T: drop>(multisignature: &multi_ed25519::Signature, public_key: &multi_ed25519::UnvalidatedPublicKey, data: T): bool
+
+ + + +
+Implementation + + +
public fun signature_verify_strict_t<T: drop>(multisignature: &Signature, public_key: &UnvalidatedPublicKey, data: T): bool {
+    let encoded = ed25519::new_signed_message(data);
+
+    signature_verify_strict_internal(multisignature.bytes, public_key.bytes, bcs::to_bytes(&encoded))
+}
+
+ + + +
+ + + +## Function `unvalidated_public_key_to_authentication_key` + +Derives the Aptos-specific authentication key of the given Ed25519 public key. + + +
public fun unvalidated_public_key_to_authentication_key(pk: &multi_ed25519::UnvalidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun unvalidated_public_key_to_authentication_key(pk: &UnvalidatedPublicKey): vector<u8> {
+    public_key_bytes_to_authentication_key(pk.bytes)
+}
+
+ + + +
+ + + +## Function `unvalidated_public_key_num_sub_pks` + +Returns the number n of sub-PKs in an unvalidated t-out-of-n MultiEd25519 PK. +If this UnvalidatedPublicKey would pass validation in public_key_validate, then the returned # of sub-PKs +can be relied upon as correct. + +We provide this API as a cheaper alternative to calling public_key_validate and then validated_public_key_num_sub_pks +when the input pk is known to be valid. + + +
public fun unvalidated_public_key_num_sub_pks(pk: &multi_ed25519::UnvalidatedPublicKey): u8
+
+ + + +
+Implementation + + +
public fun unvalidated_public_key_num_sub_pks(pk: &UnvalidatedPublicKey): u8 {
+    let len = vector::length(&pk.bytes);
+
+    ((len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES) as u8)
+}
+
+ + + +
+ + + +## Function `unvalidated_public_key_threshold` + +Returns the number t of sub-PKs in an unvalidated t-out-of-n MultiEd25519 PK (i.e., the threshold) or None +if bytes does not correctly encode such a PK. + + +
public fun unvalidated_public_key_threshold(pk: &multi_ed25519::UnvalidatedPublicKey): option::Option<u8>
+
+ + + +
+Implementation + + +
public fun unvalidated_public_key_threshold(pk: &UnvalidatedPublicKey): Option<u8> {
+    check_and_get_threshold(pk.bytes)
+}
+
+ + + +
+ + + +## Function `validated_public_key_to_authentication_key` + +Derives the Aptos-specific authentication key of the given Ed25519 public key. + + +
public fun validated_public_key_to_authentication_key(pk: &multi_ed25519::ValidatedPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun validated_public_key_to_authentication_key(pk: &ValidatedPublicKey): vector<u8> {
+    public_key_bytes_to_authentication_key(pk.bytes)
+}
+
+ + + +
+ + + +## Function `validated_public_key_num_sub_pks` + +Returns the number n of sub-PKs in a validated t-out-of-n MultiEd25519 PK. +Since the format of this PK has been validated, the returned # of sub-PKs is guaranteed to be correct. + + +
public fun validated_public_key_num_sub_pks(pk: &multi_ed25519::ValidatedPublicKey): u8
+
+ + + +
+Implementation + + +
public fun validated_public_key_num_sub_pks(pk: &ValidatedPublicKey): u8 {
+    let len = vector::length(&pk.bytes);
+
+    ((len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES) as u8)
+}
+
+ + + +
+ + + +## Function `validated_public_key_threshold` + +Returns the number t of sub-PKs in a validated t-out-of-n MultiEd25519 PK (i.e., the threshold). + + +
public fun validated_public_key_threshold(pk: &multi_ed25519::ValidatedPublicKey): u8
+
+ + + +
+Implementation + + +
public fun validated_public_key_threshold(pk: &ValidatedPublicKey): u8 {
+    let len = vector::length(&pk.bytes);
+    let threshold_byte = *vector::borrow(&pk.bytes, len - 1);
+
+    threshold_byte
+}
+
+ + + +
+ + + +## Function `check_and_get_threshold` + +Checks that the serialized format of a t-out-of-n MultiEd25519 PK correctly encodes 1 <= n <= 32 sub-PKs. +(All ValidatedPublicKey objects are guaranteed to pass this check.) +Returns the threshold t <= n of the PK. + + +
public fun check_and_get_threshold(bytes: vector<u8>): option::Option<u8>
+
+ + + +
+Implementation + + +
public fun check_and_get_threshold(bytes: vector<u8>): Option<u8> {
+    let len = vector::length(&bytes);
+    if (len == 0) {
+        return option::none<u8>()
+    };
+
+    let threshold_num_of_bytes = len % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
+    let num_of_keys = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
+    let threshold_byte = *vector::borrow(&bytes, len - 1);
+
+    if (num_of_keys == 0 || num_of_keys > MAX_NUMBER_OF_PUBLIC_KEYS || threshold_num_of_bytes != 1) {
+        return option::none<u8>()
+    } else if (threshold_byte == 0 || threshold_byte > (num_of_keys as u8)) {
+        return option::none<u8>()
+    } else {
+        return option::some(threshold_byte)
+    }
+}
+
+ + + +
+ + + +## Function `public_key_bytes_to_authentication_key` + +Derives the Aptos-specific authentication key of the given Ed25519 public key. + + +
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8> {
+    vector::push_back(&mut pk_bytes, SIGNATURE_SCHEME_ID);
+    std::hash::sha3_256(pk_bytes)
+}
+
+ + + +
+ + + +## Function `public_key_validate_internal` + +DEPRECATED: Use public_key_validate_internal_v2 instead. This function was NOT correctly implemented: + +1. It does not check that the # of sub public keys is > 0, which leads to invalid ValidatedPublicKey objects +against which no signature will verify, since signature_verify_strict_internal will reject such invalid PKs. +This is not a security issue, but a correctness issue. See bugfix_validated_pk_from_zero_subpks. +2. It charges too much gas: if the first sub-PK is invalid, it will charge for verifying all remaining sub-PKs. + +DEPRECATES: +- new_validated_public_key_from_bytes +- public_key_validate + +Return true if the bytes in public_key can be parsed as a valid MultiEd25519 public key: i.e., all underlying +PKs pass point-on-curve and not-in-small-subgroup checks. +Returns false otherwise. + + +
fun public_key_validate_internal(bytes: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun public_key_validate_internal(bytes: vector<u8>): bool;
+
+ + + +
+ + + +## Function `public_key_validate_v2_internal` + +Return true if the bytes in public_key can be parsed as a valid MultiEd25519 public key: i.e., all underlying +sub-PKs pass point-on-curve and not-in-small-subgroup checks. +Returns false otherwise. + + +
fun public_key_validate_v2_internal(bytes: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun public_key_validate_v2_internal(bytes: vector<u8>): bool;
+
+ + + +
+ + + +## Function `signature_verify_strict_internal` + +Return true if the MultiEd25519 multisignature on message verifies against the MultiEd25519 public_key. +Returns false if either: +- The PKs in public_key do not all pass points-on-curve or not-in-small-subgroup checks, +- The signatures in multisignature do not all pass points-on-curve or not-in-small-subgroup checks, +- the multisignature on message does not verify. + + +
fun signature_verify_strict_internal(multisignature: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun signature_verify_strict_internal(
+    multisignature: vector<u8>,
+    public_key: vector<u8>,
+    message: vector<u8>
+): bool;
+
+ + + +
+ + + +## Specification + + + + +### Function `new_unvalidated_public_key_from_bytes` + + +
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): multi_ed25519::UnvalidatedPublicKey
+
+ + + + +
include NewUnvalidatedPublicKeyFromBytesAbortsIf;
+ensures result == UnvalidatedPublicKey { bytes };
+
+ + + + + + + +
schema NewUnvalidatedPublicKeyFromBytesAbortsIf {
+    bytes: vector<u8>;
+    let length = len(bytes);
+    aborts_if length / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES > MAX_NUMBER_OF_PUBLIC_KEYS;
+    aborts_if length % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES != THRESHOLD_SIZE_BYTES;
+}
+
+ + + + + +### Function `new_validated_public_key_from_bytes` + + +
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): option::Option<multi_ed25519::ValidatedPublicKey>
+
+ + + + +
aborts_if false;
+let cond = len(bytes) % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES
+    && spec_public_key_validate_internal(bytes);
+ensures cond ==> result == option::spec_some(ValidatedPublicKey{bytes});
+ensures !cond ==> result == option::spec_none<ValidatedPublicKey>();
+
+ + + + + +### Function `new_validated_public_key_from_bytes_v2` + + +
public fun new_validated_public_key_from_bytes_v2(bytes: vector<u8>): option::Option<multi_ed25519::ValidatedPublicKey>
+
+ + + + +
let cond = spec_public_key_validate_v2_internal(bytes);
+ensures cond ==> result == option::spec_some(ValidatedPublicKey{bytes});
+ensures !cond ==> result == option::spec_none<ValidatedPublicKey>();
+
+ + + + + +### Function `new_signature_from_bytes` + + +
public fun new_signature_from_bytes(bytes: vector<u8>): multi_ed25519::Signature
+
+ + + + +
include NewSignatureFromBytesAbortsIf;
+ensures result == Signature { bytes };
+
+ + + + + + + +
schema NewSignatureFromBytesAbortsIf {
+    bytes: vector<u8>;
+    aborts_if len(bytes) % INDIVIDUAL_SIGNATURE_NUM_BYTES != BITMAP_NUM_OF_BYTES;
+}
+
+ + + + + +### Function `unvalidated_public_key_num_sub_pks` + + +
public fun unvalidated_public_key_num_sub_pks(pk: &multi_ed25519::UnvalidatedPublicKey): u8
+
+ + + + +
let bytes = pk.bytes;
+include PkDivision;
+
+ + + + + +### Function `unvalidated_public_key_threshold` + + +
public fun unvalidated_public_key_threshold(pk: &multi_ed25519::UnvalidatedPublicKey): option::Option<u8>
+
+ + + + +
aborts_if false;
+ensures result == spec_check_and_get_threshold(pk.bytes);
+
+ + + + + +### Function `validated_public_key_num_sub_pks` + + +
public fun validated_public_key_num_sub_pks(pk: &multi_ed25519::ValidatedPublicKey): u8
+
+ + + + +
let bytes = pk.bytes;
+include PkDivision;
+
+ + + + + +### Function `validated_public_key_threshold` + + +
public fun validated_public_key_threshold(pk: &multi_ed25519::ValidatedPublicKey): u8
+
+ + + + +
aborts_if len(pk.bytes) == 0;
+ensures result == pk.bytes[len(pk.bytes) - 1];
+
+ + + + + +### Function `check_and_get_threshold` + + +
public fun check_and_get_threshold(bytes: vector<u8>): option::Option<u8>
+
+ + + + +
aborts_if false;
+ensures result == spec_check_and_get_threshold(bytes);
+
+ + + + + + + +
schema PkDivision {
+    bytes: vector<u8>;
+    result: u8;
+    aborts_if len(bytes) / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES > MAX_U8;
+    ensures result == len(bytes) / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
+}
+
+ + + + + +### Function `public_key_bytes_to_authentication_key` + + +
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures [abstract] result == spec_public_key_bytes_to_authentication_key(pk_bytes);
+
+ + + + + +### Function `public_key_validate_internal` + + +
fun public_key_validate_internal(bytes: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures (len(bytes) / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES > MAX_NUMBER_OF_PUBLIC_KEYS) ==> (result == false);
+ensures result == spec_public_key_validate_internal(bytes);
+
+ + + + + +### Function `public_key_validate_v2_internal` + + +
fun public_key_validate_v2_internal(bytes: vector<u8>): bool
+
+ + + + +
pragma opaque;
+ensures result == spec_public_key_validate_v2_internal(bytes);
+
+ + + + + +### Function `signature_verify_strict_internal` + + +
fun signature_verify_strict_internal(multisignature: vector<u8>, public_key: vector<u8>, message: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_signature_verify_strict_internal(multisignature, public_key, message);
+
+ + + + + +### Helper functions + + + + + + +
fun spec_check_and_get_threshold(bytes: vector<u8>): Option<u8> {
+   let len = len(bytes);
+   if (len == 0) {
+       option::none<u8>()
+   } else {
+       let threshold_num_of_bytes = len % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
+       let num_of_keys = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
+       let threshold_byte = bytes[len - 1];
+       if (num_of_keys == 0 || num_of_keys > MAX_NUMBER_OF_PUBLIC_KEYS || len % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES != 1) {
+           option::none<u8>()
+       } else if (threshold_byte == 0 || threshold_byte > (num_of_keys as u8)) {
+           option::none<u8>()
+       } else {
+           option::spec_some(threshold_byte)
+       }
+   }
+}
+
+ + + + + + + +
fun spec_signature_verify_strict_internal(
+   multisignature: vector<u8>,
+   public_key: vector<u8>,
+   message: vector<u8>
+): bool;
+
+ + + + + + + +
fun spec_public_key_validate_internal(bytes: vector<u8>): bool;
+
+ + + + + + + +
fun spec_public_key_validate_v2_internal(bytes: vector<u8>): bool;
+
+ + + + + + + +
fun spec_public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_signature_verify_strict_t<T>(signature: Signature, public_key: UnvalidatedPublicKey, data: T): bool {
+   let encoded = ed25519::new_signed_message<T>(data);
+   let message = bcs::serialize(encoded);
+   spec_signature_verify_strict_internal(signature.bytes, public_key.bytes, message)
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/overview.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/overview.md new file mode 100644 index 0000000000000..6176385db1d97 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/overview.md @@ -0,0 +1,50 @@ + + + +# Aptos Standard Library + + +This is the reference documentation of the Aptos standard library. + + + + +## Index + + +- [`0x1::any`](any.md#0x1_any) +- [`0x1::aptos_hash`](hash.md#0x1_aptos_hash) +- [`0x1::big_vector`](big_vector.md#0x1_big_vector) +- [`0x1::bls12381`](bls12381.md#0x1_bls12381) +- [`0x1::bls12381_algebra`](bls12381_algebra.md#0x1_bls12381_algebra) +- [`0x1::bn254_algebra`](bn254_algebra.md#0x1_bn254_algebra) +- [`0x1::capability`](capability.md#0x1_capability) +- [`0x1::comparator`](comparator.md#0x1_comparator) +- [`0x1::copyable_any`](copyable_any.md#0x1_copyable_any) +- [`0x1::crypto_algebra`](crypto_algebra.md#0x1_crypto_algebra) +- [`0x1::debug`](debug.md#0x1_debug) +- [`0x1::ed25519`](ed25519.md#0x1_ed25519) +- [`0x1::fixed_point64`](fixed_point64.md#0x1_fixed_point64) +- [`0x1::from_bcs`](from_bcs.md#0x1_from_bcs) +- [`0x1::math128`](math128.md#0x1_math128) +- [`0x1::math64`](math64.md#0x1_math64) +- [`0x1::math_fixed`](math_fixed.md#0x1_math_fixed) +- [`0x1::math_fixed64`](math_fixed64.md#0x1_math_fixed64) +- [`0x1::multi_ed25519`](multi_ed25519.md#0x1_multi_ed25519) +- [`0x1::pool_u64`](pool_u64.md#0x1_pool_u64) +- [`0x1::pool_u64_unbound`](pool_u64_unbound.md#0x1_pool_u64_unbound) +- [`0x1::ristretto255`](ristretto255.md#0x1_ristretto255) +- [`0x1::ristretto255_bulletproofs`](ristretto255_bulletproofs.md#0x1_ristretto255_bulletproofs) +- [`0x1::ristretto255_elgamal`](ristretto255_elgamal.md#0x1_ristretto255_elgamal) +- [`0x1::ristretto255_pedersen`](ristretto255_pedersen.md#0x1_ristretto255_pedersen) +- [`0x1::secp256k1`](secp256k1.md#0x1_secp256k1) +- [`0x1::simple_map`](simple_map.md#0x1_simple_map) +- [`0x1::smart_table`](smart_table.md#0x1_smart_table) +- [`0x1::smart_vector`](smart_vector.md#0x1_smart_vector) +- [`0x1::string_utils`](string_utils.md#0x1_string_utils) +- [`0x1::table`](table.md#0x1_table) +- [`0x1::table_with_length`](table_with_length.md#0x1_table_with_length) +- [`0x1::type_info`](type_info.md#0x1_type_info) + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/pool_u64.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/pool_u64.md new file mode 100644 index 0000000000000..b40c2ce028740 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/pool_u64.md @@ -0,0 +1,1292 @@ + + + +# Module `0x1::pool_u64` + + +Simple module for tracking and calculating shares of a pool of coins. The shares are worth more as the total coins in +the pool increases. New shareholder can buy more shares or redeem their existing shares. + +Example flow: +1. Pool start outs empty. +2. Shareholder A buys in with 1000 coins. A will receive 1000 shares in the pool. Pool now has 1000 total coins and +1000 total shares. +3. Pool appreciates in value from rewards and now has 2000 coins. A's 1000 shares are now worth 2000 coins. +4. Shareholder B now buys in with 1000 coins. Since before the buy in, each existing share is worth 2 coins, B will +receive 500 shares in exchange for 1000 coins. Pool now has 1500 shares and 3000 coins. +5. Pool appreciates in value from rewards and now has 6000 coins. +6. A redeems 500 shares. Each share is worth 6000 / 1500 = 4. A receives 2000 coins. Pool has 4000 coins and 1000 +shares left. + + +- [Struct `Pool`](#0x1_pool_u64_Pool) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_pool_u64_new) +- [Function `create`](#0x1_pool_u64_create) +- [Function `create_with_scaling_factor`](#0x1_pool_u64_create_with_scaling_factor) +- [Function `destroy_empty`](#0x1_pool_u64_destroy_empty) +- [Function `total_coins`](#0x1_pool_u64_total_coins) +- [Function `total_shares`](#0x1_pool_u64_total_shares) +- [Function `contains`](#0x1_pool_u64_contains) +- [Function `shares`](#0x1_pool_u64_shares) +- [Function `balance`](#0x1_pool_u64_balance) +- [Function `shareholders`](#0x1_pool_u64_shareholders) +- [Function `shareholders_count`](#0x1_pool_u64_shareholders_count) +- [Function `update_total_coins`](#0x1_pool_u64_update_total_coins) +- [Function `buy_in`](#0x1_pool_u64_buy_in) +- [Function `add_shares`](#0x1_pool_u64_add_shares) +- [Function `redeem_shares`](#0x1_pool_u64_redeem_shares) +- [Function `transfer_shares`](#0x1_pool_u64_transfer_shares) +- [Function `deduct_shares`](#0x1_pool_u64_deduct_shares) +- [Function `amount_to_shares`](#0x1_pool_u64_amount_to_shares) +- [Function `amount_to_shares_with_total_coins`](#0x1_pool_u64_amount_to_shares_with_total_coins) +- [Function `shares_to_amount`](#0x1_pool_u64_shares_to_amount) +- [Function `shares_to_amount_with_total_coins`](#0x1_pool_u64_shares_to_amount_with_total_coins) +- [Function `multiply_then_divide`](#0x1_pool_u64_multiply_then_divide) +- [Function `to_u128`](#0x1_pool_u64_to_u128) +- [Specification](#@Specification_1) + - [Struct `Pool`](#@Specification_1_Pool) + - [Function `contains`](#@Specification_1_contains) + - [Function `shares`](#@Specification_1_shares) + - [Function `balance`](#@Specification_1_balance) + - [Function `buy_in`](#@Specification_1_buy_in) + - [Function `add_shares`](#@Specification_1_add_shares) + - [Function `redeem_shares`](#@Specification_1_redeem_shares) + - [Function `transfer_shares`](#@Specification_1_transfer_shares) + - [Function `deduct_shares`](#@Specification_1_deduct_shares) + - [Function `amount_to_shares_with_total_coins`](#@Specification_1_amount_to_shares_with_total_coins) + - [Function `shares_to_amount_with_total_coins`](#@Specification_1_shares_to_amount_with_total_coins) + - [Function `multiply_then_divide`](#@Specification_1_multiply_then_divide) + + +
use 0x1::error;
+use 0x1::simple_map;
+use 0x1::vector;
+
+ + + + + +## Struct `Pool` + + + +
struct Pool has store
+
+ + + +
+Fields + + +
+
+shareholders_limit: u64 +
+
+ +
+
+total_coins: u64 +
+
+ +
+
+total_shares: u64 +
+
+ +
+
+shares: simple_map::SimpleMap<address, u64> +
+
+ +
+
+shareholders: vector<address> +
+
+ +
+
+scaling_factor: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + +Cannot redeem more shares than the shareholder has in the pool. + + +
const EINSUFFICIENT_SHARES: u64 = 4;
+
+ + + + + +Cannot destroy non-empty pool. + + +
const EPOOL_IS_NOT_EMPTY: u64 = 3;
+
+ + + + + +Pool's total coins cannot exceed u64.max. + + +
const EPOOL_TOTAL_COINS_OVERFLOW: u64 = 6;
+
+ + + + + +Pool's total shares cannot exceed u64.max. + + +
const EPOOL_TOTAL_SHARES_OVERFLOW: u64 = 7;
+
+ + + + + +Shareholder not present in pool. + + +
const ESHAREHOLDER_NOT_FOUND: u64 = 1;
+
+ + + + + +Shareholder cannot have more than u64.max shares. + + +
const ESHAREHOLDER_SHARES_OVERFLOW: u64 = 5;
+
+ + + + + +There are too many shareholders in the pool. + + +
const ETOO_MANY_SHAREHOLDERS: u64 = 2;
+
+ + + + + +## Function `new` + +Create a new pool. + + +
public fun new(shareholders_limit: u64): pool_u64::Pool
+
+ + + +
+Implementation + + +
public fun new(shareholders_limit: u64): Pool {
+    // Default to a scaling factor of 1 (effectively no scaling).
+    create_with_scaling_factor(shareholders_limit, 1)
+}
+
+ + + +
+ + + +## Function `create` + +Deprecated. Use new instead. +Create a new pool. + + +
#[deprecated]
+public fun create(shareholders_limit: u64): pool_u64::Pool
+
+ + + +
+Implementation + + +
public fun create(shareholders_limit: u64): Pool {
+    new(shareholders_limit)
+}
+
+ + + +
+ + + +## Function `create_with_scaling_factor` + +Create a new pool with custom scaling_factor. + + +
public fun create_with_scaling_factor(shareholders_limit: u64, scaling_factor: u64): pool_u64::Pool
+
+ + + +
+Implementation + + +
public fun create_with_scaling_factor(shareholders_limit: u64, scaling_factor: u64): Pool {
+    Pool {
+        shareholders_limit,
+        total_coins: 0,
+        total_shares: 0,
+        shares: simple_map::create<address, u64>(),
+        shareholders: vector::empty<address>(),
+        scaling_factor,
+    }
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy an empty pool. This will fail if the pool has any balance of coins. + + +
public fun destroy_empty(self: pool_u64::Pool)
+
+ + + +
+Implementation + + +
public fun destroy_empty(self: Pool) {
+    assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
+    let Pool {
+        shareholders_limit: _,
+        total_coins: _,
+        total_shares: _,
+        shares: _,
+        shareholders: _,
+        scaling_factor: _,
+    } = self;
+}
+
+ + + +
+ + + +## Function `total_coins` + +Return self's total balance of coins. + + +
public fun total_coins(self: &pool_u64::Pool): u64
+
+ + + +
+Implementation + + +
public fun total_coins(self: &Pool): u64 {
+    self.total_coins
+}
+
+ + + +
+ + + +## Function `total_shares` + +Return the total number of shares across all shareholders in self. + + +
public fun total_shares(self: &pool_u64::Pool): u64
+
+ + + +
+Implementation + + +
public fun total_shares(self: &Pool): u64 {
+    self.total_shares
+}
+
+ + + +
+ + + +## Function `contains` + +Return true if shareholder is in self. + + +
public fun contains(self: &pool_u64::Pool, shareholder: address): bool
+
+ + + +
+Implementation + + +
public fun contains(self: &Pool, shareholder: address): bool {
+    simple_map::contains_key(&self.shares, &shareholder)
+}
+
+ + + +
+ + + +## Function `shares` + +Return the number of shares of stakeholder in self. + + +
public fun shares(self: &pool_u64::Pool, shareholder: address): u64
+
+ + + +
+Implementation + + +
public fun shares(self: &Pool, shareholder: address): u64 {
+    if (contains(self, shareholder)) {
+        *simple_map::borrow(&self.shares, &shareholder)
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `balance` + +Return the balance in coins of shareholder in self. + + +
public fun balance(self: &pool_u64::Pool, shareholder: address): u64
+
+ + + +
+Implementation + + +
public fun balance(self: &Pool, shareholder: address): u64 {
+    let num_shares = shares(self, shareholder);
+    shares_to_amount(self, num_shares)
+}
+
+ + + +
+ + + +## Function `shareholders` + +Return the list of shareholders in self. + + +
public fun shareholders(self: &pool_u64::Pool): vector<address>
+
+ + + +
+Implementation + + +
public fun shareholders(self: &Pool): vector<address> {
+    self.shareholders
+}
+
+ + + +
+ + + +## Function `shareholders_count` + +Return the number of shareholders in self. + + +
public fun shareholders_count(self: &pool_u64::Pool): u64
+
+ + + +
+Implementation + + +
public fun shareholders_count(self: &Pool): u64 {
+    vector::length(&self.shareholders)
+}
+
+ + + +
+ + + +## Function `update_total_coins` + +Update self's total balance of coins. + + +
public fun update_total_coins(self: &mut pool_u64::Pool, new_total_coins: u64)
+
+ + + +
+Implementation + + +
public fun update_total_coins(self: &mut Pool, new_total_coins: u64) {
+    self.total_coins = new_total_coins;
+}
+
+ + + +
+ + + +## Function `buy_in` + +Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. + + +
public fun buy_in(self: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
+
+ + + +
+Implementation + + +
public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 {
+    if (coins_amount == 0) return 0;
+
+    let new_shares = amount_to_shares(self, coins_amount);
+    assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    assert!(MAX_U64 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+
+    self.total_coins = self.total_coins + coins_amount;
+    self.total_shares = self.total_shares + new_shares;
+    add_shares(self, shareholder, new_shares);
+    new_shares
+}
+
+ + + +
+ + + +## Function `add_shares` + +Add the number of shares directly for shareholder in self. +This would dilute other shareholders if the pool's balance of coins didn't change. + + +
fun add_shares(self: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
+
+ + + +
+Implementation + + +
fun add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 {
+    if (contains(self, shareholder)) {
+        let existing_shares = simple_map::borrow_mut(&mut self.shares, &shareholder);
+        let current_shares = *existing_shares;
+        assert!(MAX_U64 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW));
+
+        *existing_shares = current_shares + new_shares;
+        *existing_shares
+    } else if (new_shares > 0) {
+        assert!(
+            vector::length(&self.shareholders) < self.shareholders_limit,
+            error::invalid_state(ETOO_MANY_SHAREHOLDERS),
+        );
+
+        vector::push_back(&mut self.shareholders, shareholder);
+        simple_map::add(&mut self.shares, shareholder, new_shares);
+        new_shares
+    } else {
+        new_shares
+    }
+}
+
+ + + +
+ + + +## Function `redeem_shares` + +Allow shareholder to redeem their shares in self for coins. + + +
public fun redeem_shares(self: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
+
+ + + +
+Implementation + + +
public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
+
+    if (shares_to_redeem == 0) return 0;
+
+    let redeemed_coins = shares_to_amount(self, shares_to_redeem);
+    self.total_coins = self.total_coins - redeemed_coins;
+    self.total_shares = self.total_shares - shares_to_redeem;
+    deduct_shares(self, shareholder, shares_to_redeem);
+
+    redeemed_coins
+}
+
+ + + +
+ + + +## Function `transfer_shares` + +Transfer shares from shareholder_1 to shareholder_2. + + +
public fun transfer_shares(self: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
+
+ + + +
+Implementation + + +
public fun transfer_shares(
+    self: &mut Pool,
+    shareholder_1: address,
+    shareholder_2: address,
+    shares_to_transfer: u64,
+) {
+    assert!(contains(self, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
+    if (shares_to_transfer == 0) return;
+
+    deduct_shares(self, shareholder_1, shares_to_transfer);
+    add_shares(self, shareholder_2, shares_to_transfer);
+}
+
+ + + +
+ + + +## Function `deduct_shares` + +Directly deduct shareholder's number of shares in self and return the number of remaining shares. + + +
fun deduct_shares(self: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
+
+ + + +
+Implementation + + +
fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
+
+    let existing_shares = simple_map::borrow_mut(&mut self.shares, &shareholder);
+    *existing_shares = *existing_shares - num_shares;
+
+    // Remove the shareholder completely if they have no shares left.
+    let remaining_shares = *existing_shares;
+    if (remaining_shares == 0) {
+        let (_, shareholder_index) = vector::index_of(&self.shareholders, &shareholder);
+        vector::remove(&mut self.shareholders, shareholder_index);
+        simple_map::remove(&mut self.shares, &shareholder);
+    };
+
+    remaining_shares
+}
+
+ + + +
+ + + +## Function `amount_to_shares` + +Return the number of new shares coins_amount can buy in self. +amount needs to big enough to avoid rounding number. + + +
public fun amount_to_shares(self: &pool_u64::Pool, coins_amount: u64): u64
+
+ + + +
+Implementation + + +
public fun amount_to_shares(self: &Pool, coins_amount: u64): u64 {
+    amount_to_shares_with_total_coins(self, coins_amount, self.total_coins)
+}
+
+ + + +
+ + + +## Function `amount_to_shares_with_total_coins` + +Return the number of new shares coins_amount can buy in self with a custom total coins number. +amount needs to big enough to avoid rounding number. + + +
public fun amount_to_shares_with_total_coins(self: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
+
+ + + +
+Implementation + + +
public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 {
+    // No shares yet so amount is worth the same number of shares.
+    if (self.total_coins == 0 || self.total_shares == 0) {
+        // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems.
+        // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow.
+        coins_amount * self.scaling_factor
+    } else {
+        // Shares price = total_coins / total existing shares.
+        // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount.
+        // We rearrange the calc and do multiplication first to avoid rounding errors.
+        multiply_then_divide(self, coins_amount, self.total_shares, total_coins)
+    }
+}
+
+ + + +
+ + + +## Function `shares_to_amount` + +Return the number of coins shares are worth in self. +shares needs to big enough to avoid rounding number. + + +
public fun shares_to_amount(self: &pool_u64::Pool, shares: u64): u64
+
+ + + +
+Implementation + + +
public fun shares_to_amount(self: &Pool, shares: u64): u64 {
+    shares_to_amount_with_total_coins(self, shares, self.total_coins)
+}
+
+ + + +
+ + + +## Function `shares_to_amount_with_total_coins` + +Return the number of coins shares are worth in self with a custom total coins number. +shares needs to big enough to avoid rounding number. + + +
public fun shares_to_amount_with_total_coins(self: &pool_u64::Pool, shares: u64, total_coins: u64): u64
+
+ + + +
+Implementation + + +
public fun shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 {
+    // No shares or coins yet so shares are worthless.
+    if (self.total_coins == 0 || self.total_shares == 0) {
+        0
+    } else {
+        // Shares price = total_coins / total existing shares.
+        // Shares worth = shares * shares price = shares * total_coins / total existing shares.
+        // We rearrange the calc and do multiplication first to avoid rounding errors.
+        multiply_then_divide(self, shares, total_coins, self.total_shares)
+    }
+}
+
+ + + +
+ + + +## Function `multiply_then_divide` + + + +
public fun multiply_then_divide(self: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
+
+ + + +
+Implementation + + +
public fun multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 {
+    let result = (to_u128(x) * to_u128(y)) / to_u128(z);
+    (result as u64)
+}
+
+ + + +
+ + + +## Function `to_u128` + + + +
fun to_u128(num: u64): u128
+
+ + + +
+Implementation + + +
fun to_u128(num: u64): u128 {
+    (num as u128)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + + + + +### Struct `Pool` + + +
struct Pool has store
+
+ + + +
+
+shareholders_limit: u64 +
+
+ +
+
+total_coins: u64 +
+
+ +
+
+total_shares: u64 +
+
+ +
+
+shares: simple_map::SimpleMap<address, u64> +
+
+ +
+
+shareholders: vector<address> +
+
+ +
+
+scaling_factor: u64 +
+
+ +
+
+ + + +
invariant forall addr: address:
+    (simple_map::spec_contains_key(shares, addr) == vector::spec_contains(shareholders, addr));
+invariant forall i in 0..len(shareholders), j in 0..len(shareholders):
+    shareholders[i] == shareholders[j] ==> i == j;
+
+ + + + + + + +
fun spec_contains(pool: Pool, shareholder: address): bool {
+   simple_map::spec_contains_key(pool.shares, shareholder)
+}
+
+ + + + + +### Function `contains` + + +
public fun contains(self: &pool_u64::Pool, shareholder: address): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_contains(self, shareholder);
+
+ + + + + + + +
fun spec_shares(pool: Pool, shareholder: address): u64 {
+   if (simple_map::spec_contains_key(pool.shares, shareholder)) {
+       simple_map::spec_get(pool.shares, shareholder)
+   }
+   else {
+       0
+   }
+}
+
+ + + + + +### Function `shares` + + +
public fun shares(self: &pool_u64::Pool, shareholder: address): u64
+
+ + + + +
aborts_if false;
+ensures result == spec_shares(self, shareholder);
+
+ + + + + +### Function `balance` + + +
public fun balance(self: &pool_u64::Pool, shareholder: address): u64
+
+ + + + +
let shares = spec_shares(self, shareholder);
+let total_coins = self.total_coins;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
+
+ + + + + +### Function `buy_in` + + +
public fun buy_in(self: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
+
+ + + + +
let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins);
+aborts_if self.total_coins + coins_amount > MAX_U64;
+aborts_if self.total_shares + new_shares > MAX_U64;
+include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares };
+include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares };
+ensures self.total_coins == old(self.total_coins) + coins_amount;
+ensures self.total_shares == old(self.total_shares) + new_shares;
+ensures result == new_shares;
+
+ + + + + +### Function `add_shares` + + +
fun add_shares(self: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
+
+ + + + +
include AddSharesAbortsIf;
+include AddSharesEnsures;
+let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+ensures result == if (key_exists) { simple_map::spec_get(self.shares, shareholder) }
+else { new_shares };
+
+ + + + + + + +
schema AddSharesAbortsIf {
+    self: Pool;
+    shareholder: address;
+    new_shares: u64;
+    let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+    let current_shares = simple_map::spec_get(self.shares, shareholder);
+    aborts_if key_exists && current_shares + new_shares > MAX_U64;
+    aborts_if !key_exists && new_shares > 0 && len(self.shareholders) >= self.shareholders_limit;
+}
+
+ + + + + + + +
schema AddSharesEnsures {
+    self: Pool;
+    shareholder: address;
+    new_shares: u64;
+    let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+    let current_shares = simple_map::spec_get(self.shares, shareholder);
+    ensures key_exists ==>
+        self.shares == simple_map::spec_set(old(self.shares), shareholder, current_shares + new_shares);
+    ensures (!key_exists && new_shares > 0) ==>
+        self.shares == simple_map::spec_set(old(self.shares), shareholder, new_shares);
+    ensures (!key_exists && new_shares > 0) ==>
+        vector::eq_push_back(self.shareholders, old(self.shareholders), shareholder);
+}
+
+ + + + + + + +
fun spec_amount_to_shares_with_total_coins(pool: Pool, coins_amount: u64, total_coins: u64): u64 {
+   if (pool.total_coins == 0 || pool.total_shares == 0) {
+       coins_amount * pool.scaling_factor
+   }
+   else {
+       (coins_amount * pool.total_shares) / total_coins
+   }
+}
+
+ + + + + +### Function `redeem_shares` + + +
public fun redeem_shares(self: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
+
+ + + + +
let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins);
+aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < shares_to_redeem;
+aborts_if self.total_coins < redeemed_coins;
+aborts_if self.total_shares < shares_to_redeem;
+ensures self.total_coins == old(self.total_coins) - redeemed_coins;
+ensures self.total_shares == old(self.total_shares) - shares_to_redeem;
+include shares_to_redeem > 0 ==> DeductSharesEnsures {
+    num_shares: shares_to_redeem
+};
+ensures result == redeemed_coins;
+
+ + + + + +### Function `transfer_shares` + + +
public fun transfer_shares(self: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if !spec_contains(self, shareholder_1);
+aborts_if spec_shares(self, shareholder_1) < shares_to_transfer;
+
+ + + + + +### Function `deduct_shares` + + +
fun deduct_shares(self: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
+
+ + + + +
aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < num_shares;
+include DeductSharesEnsures;
+let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares;
+ensures remaining_shares > 0 ==> result == simple_map::spec_get(self.shares, shareholder);
+ensures remaining_shares == 0 ==> result == 0;
+
+ + + + + + + +
schema DeductSharesEnsures {
+    self: Pool;
+    shareholder: address;
+    num_shares: u64;
+    let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares;
+    ensures remaining_shares > 0 ==> simple_map::spec_get(self.shares, shareholder) == remaining_shares;
+    ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(self.shares, shareholder);
+    ensures remaining_shares == 0 ==> !vector::spec_contains(self.shareholders, shareholder);
+}
+
+ + + + + +### Function `amount_to_shares_with_total_coins` + + +
public fun amount_to_shares_with_total_coins(self: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
+
+ + + + +
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (coins_amount * self.total_shares) / total_coins > MAX_U64;
+aborts_if (self.total_coins == 0 || self.total_shares == 0)
+    && coins_amount * self.scaling_factor > MAX_U64;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0;
+ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins);
+
+ + + + + +### Function `shares_to_amount_with_total_coins` + + +
public fun shares_to_amount_with_total_coins(self: &pool_u64::Pool, shares: u64, total_coins: u64): u64
+
+ + + + +
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
+
+ + + + + + + +
fun spec_shares_to_amount_with_total_coins(pool: Pool, shares: u64, total_coins: u64): u64 {
+   if (pool.total_coins == 0 || pool.total_shares == 0) {
+       0
+   }
+   else {
+       (shares * total_coins) / pool.total_shares
+   }
+}
+
+ + + + + +### Function `multiply_then_divide` + + +
public fun multiply_then_divide(self: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
+
+ + + + +
aborts_if z == 0;
+aborts_if (x * y) / z > MAX_U64;
+ensures result == (x * y) / z;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/pool_u64_unbound.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/pool_u64_unbound.md new file mode 100644 index 0000000000000..31bc3ca2edb2e --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/pool_u64_unbound.md @@ -0,0 +1,1336 @@ + + + +# Module `0x1::pool_u64_unbound` + + +Simple module for tracking and calculating shares of a pool of coins. The shares are worth more as the total coins in +the pool increases. New shareholder can buy more shares or redeem their existing shares. + +Example flow: +1. Pool start outs empty. +2. Shareholder A buys in with 1000 coins. A will receive 1000 shares in the pool. Pool now has 1000 total coins and +1000 total shares. +3. Pool appreciates in value from rewards and now has 2000 coins. A's 1000 shares are now worth 2000 coins. +4. Shareholder B now buys in with 1000 coins. Since before the buy in, each existing share is worth 2 coins, B will +receive 500 shares in exchange for 1000 coins. Pool now has 1500 shares and 3000 coins. +5. Pool appreciates in value from rewards and now has 6000 coins. +6. A redeems 500 shares. Each share is worth 6000 / 1500 = 4. A receives 2000 coins. Pool has 4000 coins and 1000 +shares left. + + +- [Struct `Pool`](#0x1_pool_u64_unbound_Pool) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_pool_u64_unbound_new) +- [Function `create`](#0x1_pool_u64_unbound_create) +- [Function `create_with_scaling_factor`](#0x1_pool_u64_unbound_create_with_scaling_factor) +- [Function `destroy_empty`](#0x1_pool_u64_unbound_destroy_empty) +- [Function `total_coins`](#0x1_pool_u64_unbound_total_coins) +- [Function `total_shares`](#0x1_pool_u64_unbound_total_shares) +- [Function `contains`](#0x1_pool_u64_unbound_contains) +- [Function `shares`](#0x1_pool_u64_unbound_shares) +- [Function `balance`](#0x1_pool_u64_unbound_balance) +- [Function `shareholders_count`](#0x1_pool_u64_unbound_shareholders_count) +- [Function `update_total_coins`](#0x1_pool_u64_unbound_update_total_coins) +- [Function `buy_in`](#0x1_pool_u64_unbound_buy_in) +- [Function `add_shares`](#0x1_pool_u64_unbound_add_shares) +- [Function `redeem_shares`](#0x1_pool_u64_unbound_redeem_shares) +- [Function `transfer_shares`](#0x1_pool_u64_unbound_transfer_shares) +- [Function `deduct_shares`](#0x1_pool_u64_unbound_deduct_shares) +- [Function `amount_to_shares`](#0x1_pool_u64_unbound_amount_to_shares) +- [Function `amount_to_shares_with_total_coins`](#0x1_pool_u64_unbound_amount_to_shares_with_total_coins) +- [Function `shares_to_amount`](#0x1_pool_u64_unbound_shares_to_amount) +- [Function `shares_to_amount_with_total_coins`](#0x1_pool_u64_unbound_shares_to_amount_with_total_coins) +- [Function `shares_to_amount_with_total_stats`](#0x1_pool_u64_unbound_shares_to_amount_with_total_stats) +- [Function `multiply_then_divide`](#0x1_pool_u64_unbound_multiply_then_divide) +- [Function `to_u128`](#0x1_pool_u64_unbound_to_u128) +- [Function `to_u256`](#0x1_pool_u64_unbound_to_u256) +- [Specification](#@Specification_1) + - [Struct `Pool`](#@Specification_1_Pool) + - [Function `contains`](#@Specification_1_contains) + - [Function `shares`](#@Specification_1_shares) + - [Function `balance`](#@Specification_1_balance) + - [Function `buy_in`](#@Specification_1_buy_in) + - [Function `add_shares`](#@Specification_1_add_shares) + - [Function `redeem_shares`](#@Specification_1_redeem_shares) + - [Function `transfer_shares`](#@Specification_1_transfer_shares) + - [Function `deduct_shares`](#@Specification_1_deduct_shares) + - [Function `amount_to_shares_with_total_coins`](#@Specification_1_amount_to_shares_with_total_coins) + - [Function `shares_to_amount_with_total_coins`](#@Specification_1_shares_to_amount_with_total_coins) + - [Function `multiply_then_divide`](#@Specification_1_multiply_then_divide) + - [Function `to_u128`](#@Specification_1_to_u128) + - [Function `to_u256`](#@Specification_1_to_u256) + + +
use 0x1::error;
+use 0x1::table_with_length;
+
+ + + + + +## Struct `Pool` + + + +
struct Pool has store
+
+ + + +
+Fields + + +
+
+total_coins: u64 +
+
+ +
+
+total_shares: u128 +
+
+ +
+
+shares: table_with_length::TableWithLength<address, u128> +
+
+ +
+
+scaling_factor: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + + + +
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
+ + + + + +Cannot redeem more shares than the shareholder has in the pool. + + +
const EINSUFFICIENT_SHARES: u64 = 4;
+
+ + + + + +Cannot destroy non-empty pool. + + +
const EPOOL_IS_NOT_EMPTY: u64 = 3;
+
+ + + + + +Pool's total coins cannot exceed u64.max. + + +
const EPOOL_TOTAL_COINS_OVERFLOW: u64 = 6;
+
+ + + + + +Pool's total shares cannot exceed u64.max. + + +
const EPOOL_TOTAL_SHARES_OVERFLOW: u64 = 7;
+
+ + + + + +Shareholder not present in pool. + + +
const ESHAREHOLDER_NOT_FOUND: u64 = 1;
+
+ + + + + +Shareholder cannot have more than u64.max shares. + + +
const ESHAREHOLDER_SHARES_OVERFLOW: u64 = 5;
+
+ + + + + +There are too many shareholders in the pool. + + +
const ETOO_MANY_SHAREHOLDERS: u64 = 2;
+
+ + + + + +## Function `new` + +Create a new pool. + + +
public fun new(): pool_u64_unbound::Pool
+
+ + + +
+Implementation + + +
public fun new(): Pool {
+    // Default to a scaling factor of 1 (effectively no scaling).
+    create_with_scaling_factor(1)
+}
+
+ + + +
+ + + +## Function `create` + +Deprecated. Use new instead. +Create a new pool. + + +
#[deprecated]
+public fun create(): pool_u64_unbound::Pool
+
+ + + +
+Implementation + + +
public fun create(): Pool {
+    new()
+}
+
+ + + +
+ + + +## Function `create_with_scaling_factor` + +Create a new pool with custom scaling_factor. + + +
public fun create_with_scaling_factor(scaling_factor: u64): pool_u64_unbound::Pool
+
+ + + +
+Implementation + + +
public fun create_with_scaling_factor(scaling_factor: u64): Pool {
+    Pool {
+        total_coins: 0,
+        total_shares: 0,
+        shares: table::new<address, u128>(),
+        scaling_factor,
+    }
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy an empty pool. This will fail if the pool has any balance of coins. + + +
public fun destroy_empty(self: pool_u64_unbound::Pool)
+
+ + + +
+Implementation + + +
public fun destroy_empty(self: Pool) {
+    assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
+    let Pool {
+        total_coins: _,
+        total_shares: _,
+        shares,
+        scaling_factor: _,
+    } = self;
+    table::destroy_empty<address, u128>(shares);
+}
+
+ + + +
+ + + +## Function `total_coins` + +Return self's total balance of coins. + + +
public fun total_coins(self: &pool_u64_unbound::Pool): u64
+
+ + + +
+Implementation + + +
public fun total_coins(self: &Pool): u64 {
+    self.total_coins
+}
+
+ + + +
+ + + +## Function `total_shares` + +Return the total number of shares across all shareholders in self. + + +
public fun total_shares(self: &pool_u64_unbound::Pool): u128
+
+ + + +
+Implementation + + +
public fun total_shares(self: &Pool): u128 {
+    self.total_shares
+}
+
+ + + +
+ + + +## Function `contains` + +Return true if shareholder is in self. + + +
public fun contains(self: &pool_u64_unbound::Pool, shareholder: address): bool
+
+ + + +
+Implementation + + +
public fun contains(self: &Pool, shareholder: address): bool {
+    table::contains(&self.shares, shareholder)
+}
+
+ + + +
+ + + +## Function `shares` + +Return the number of shares of stakeholder in self. + + +
public fun shares(self: &pool_u64_unbound::Pool, shareholder: address): u128
+
+ + + +
+Implementation + + +
public fun shares(self: &Pool, shareholder: address): u128 {
+    if (contains(self, shareholder)) {
+        *table::borrow(&self.shares, shareholder)
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `balance` + +Return the balance in coins of shareholder in self. + + +
public fun balance(self: &pool_u64_unbound::Pool, shareholder: address): u64
+
+ + + +
+Implementation + + +
public fun balance(self: &Pool, shareholder: address): u64 {
+    let num_shares = shares(self, shareholder);
+    shares_to_amount(self, num_shares)
+}
+
+ + + +
+ + + +## Function `shareholders_count` + +Return the number of shareholders in self. + + +
public fun shareholders_count(self: &pool_u64_unbound::Pool): u64
+
+ + + +
+Implementation + + +
public fun shareholders_count(self: &Pool): u64 {
+    table::length(&self.shares)
+}
+
+ + + +
+ + + +## Function `update_total_coins` + +Update self's total balance of coins. + + +
public fun update_total_coins(self: &mut pool_u64_unbound::Pool, new_total_coins: u64)
+
+ + + +
+Implementation + + +
public fun update_total_coins(self: &mut Pool, new_total_coins: u64) {
+    self.total_coins = new_total_coins;
+}
+
+ + + +
+ + + +## Function `buy_in` + +Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. + + +
public fun buy_in(self: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
+ + + +
+Implementation + + +
public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 {
+    if (coins_amount == 0) return 0;
+
+    let new_shares = amount_to_shares(self, coins_amount);
+    assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    assert!(MAX_U128 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW));
+
+    self.total_coins = self.total_coins + coins_amount;
+    self.total_shares = self.total_shares + new_shares;
+    add_shares(self, shareholder, new_shares);
+    new_shares
+}
+
+ + + +
+ + + +## Function `add_shares` + +Add the number of shares directly for shareholder in self. +This would dilute other shareholders if the pool's balance of coins didn't change. + + +
fun add_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
+
+ + + +
+Implementation + + +
fun add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 {
+    if (contains(self, shareholder)) {
+        let existing_shares = table::borrow_mut(&mut self.shares, shareholder);
+        let current_shares = *existing_shares;
+        assert!(MAX_U128 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW));
+
+        *existing_shares = current_shares + new_shares;
+        *existing_shares
+    } else if (new_shares > 0) {
+        table::add(&mut self.shares, shareholder, new_shares);
+        new_shares
+    } else {
+        new_shares
+    }
+}
+
+ + + +
+ + + +## Function `redeem_shares` + +Allow shareholder to redeem their shares in self for coins. + + +
public fun redeem_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
+
+ + + +
+Implementation + + +
public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
+
+    if (shares_to_redeem == 0) return 0;
+
+    let redeemed_coins = shares_to_amount(self, shares_to_redeem);
+    self.total_coins = self.total_coins - redeemed_coins;
+    self.total_shares = self.total_shares - shares_to_redeem;
+    deduct_shares(self, shareholder, shares_to_redeem);
+
+    redeemed_coins
+}
+
+ + + +
+ + + +## Function `transfer_shares` + +Transfer shares from shareholder_1 to shareholder_2. + + +
public fun transfer_shares(self: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
+
+ + + +
+Implementation + + +
public fun transfer_shares(
+    self: &mut Pool,
+    shareholder_1: address,
+    shareholder_2: address,
+    shares_to_transfer: u128,
+) {
+    assert!(contains(self, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
+    if (shares_to_transfer == 0) return;
+
+    deduct_shares(self, shareholder_1, shares_to_transfer);
+    add_shares(self, shareholder_2, shares_to_transfer);
+}
+
+ + + +
+ + + +## Function `deduct_shares` + +Directly deduct shareholder's number of shares in self and return the number of remaining shares. + + +
fun deduct_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
+
+ + + +
+Implementation + + +
fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 {
+    assert!(contains(self, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(shares(self, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
+
+    let existing_shares = table::borrow_mut(&mut self.shares, shareholder);
+    *existing_shares = *existing_shares - num_shares;
+
+    // Remove the shareholder completely if they have no shares left.
+    let remaining_shares = *existing_shares;
+    if (remaining_shares == 0) {
+        table::remove(&mut self.shares, shareholder);
+    };
+
+    remaining_shares
+}
+
+ + + +
+ + + +## Function `amount_to_shares` + +Return the number of new shares coins_amount can buy in self. +amount needs to big enough to avoid rounding number. + + +
public fun amount_to_shares(self: &pool_u64_unbound::Pool, coins_amount: u64): u128
+
+ + + +
+Implementation + + +
public fun amount_to_shares(self: &Pool, coins_amount: u64): u128 {
+    amount_to_shares_with_total_coins(self, coins_amount, self.total_coins)
+}
+
+ + + +
+ + + +## Function `amount_to_shares_with_total_coins` + +Return the number of new shares coins_amount can buy in self with a custom total coins number. +amount needs to big enough to avoid rounding number. + + +
public fun amount_to_shares_with_total_coins(self: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
+
+ + + +
+Implementation + + +
public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 {
+    // No shares yet so amount is worth the same number of shares.
+    if (self.total_coins == 0 || self.total_shares == 0) {
+        // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems.
+        // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow.
+        to_u128(coins_amount) * to_u128(self.scaling_factor)
+    } else {
+        // Shares price = total_coins / total existing shares.
+        // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount.
+        // We rearrange the calc and do multiplication first to avoid rounding errors.
+        multiply_then_divide(self, to_u128(coins_amount), self.total_shares, to_u128(total_coins))
+    }
+}
+
+ + + +
+ + + +## Function `shares_to_amount` + +Return the number of coins shares are worth in self. +shares needs to big enough to avoid rounding number. + + +
public fun shares_to_amount(self: &pool_u64_unbound::Pool, shares: u128): u64
+
+ + + +
+Implementation + + +
public fun shares_to_amount(self: &Pool, shares: u128): u64 {
+    shares_to_amount_with_total_coins(self, shares, self.total_coins)
+}
+
+ + + +
+ + + +## Function `shares_to_amount_with_total_coins` + +Return the number of coins shares are worth in self with a custom total coins number. +shares needs to big enough to avoid rounding number. + + +
public fun shares_to_amount_with_total_coins(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
+
+ + + +
+Implementation + + +
public fun shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 {
+    // No shares or coins yet so shares are worthless.
+    if (self.total_coins == 0 || self.total_shares == 0) {
+        0
+    } else {
+        // Shares price = total_coins / total existing shares.
+        // Shares worth = shares * shares price = shares * total_coins / total existing shares.
+        // We rearrange the calc and do multiplication first to avoid rounding errors.
+        (multiply_then_divide(self, shares, to_u128(total_coins), self.total_shares) as u64)
+    }
+}
+
+ + + +
+ + + +## Function `shares_to_amount_with_total_stats` + +Return the number of coins shares are worth in pool with custom total coins and shares numbers. + + +
public fun shares_to_amount_with_total_stats(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64, total_shares: u128): u64
+
+ + + +
+Implementation + + +
public fun shares_to_amount_with_total_stats(
+    self: &Pool,
+    shares: u128,
+    total_coins: u64,
+    total_shares: u128,
+): u64 {
+    if (self.total_coins == 0 || total_shares == 0) {
+        0
+    } else {
+        (multiply_then_divide(self, shares, to_u128(total_coins), total_shares) as u64)
+    }
+}
+
+ + + +
+ + + +## Function `multiply_then_divide` + + + +
public fun multiply_then_divide(self: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
+
+ + + +
+Implementation + + +
public fun multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 {
+    let result = (to_u256(x) * to_u256(y)) / to_u256(z);
+    (result as u128)
+}
+
+ + + +
+ + + +## Function `to_u128` + + + +
fun to_u128(num: u64): u128
+
+ + + +
+Implementation + + +
fun to_u128(num: u64): u128 {
+    (num as u128)
+}
+
+ + + +
+ + + +## Function `to_u256` + + + +
fun to_u256(num: u128): u256
+
+ + + +
+Implementation + + +
fun to_u256(num: u128): u256 {
+    (num as u256)
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `Pool` + + +
struct Pool has store
+
+ + + +
+
+total_coins: u64 +
+
+ +
+
+total_shares: u128 +
+
+ +
+
+shares: table_with_length::TableWithLength<address, u128> +
+
+ +
+
+scaling_factor: u64 +
+
+ +
+
+ + + +
invariant forall addr: address:
+    table::spec_contains(shares, addr) ==> (table::spec_get(shares, addr) > 0);
+
+ + + + + + + +
fun spec_contains(pool: Pool, shareholder: address): bool {
+   table::spec_contains(pool.shares, shareholder)
+}
+
+ + + + + +### Function `contains` + + +
public fun contains(self: &pool_u64_unbound::Pool, shareholder: address): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_contains(self, shareholder);
+
+ + + + + + + +
fun spec_shares(pool: Pool, shareholder: address): u64 {
+   if (spec_contains(pool, shareholder)) {
+       table::spec_get(pool.shares, shareholder)
+   }
+   else {
+       0
+   }
+}
+
+ + + + + +### Function `shares` + + +
public fun shares(self: &pool_u64_unbound::Pool, shareholder: address): u128
+
+ + + + +
aborts_if false;
+ensures result == spec_shares(self, shareholder);
+
+ + + + + +### Function `balance` + + +
public fun balance(self: &pool_u64_unbound::Pool, shareholder: address): u64
+
+ + + + +
let shares = spec_shares(self, shareholder);
+let total_coins = self.total_coins;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
+
+ + + + + +### Function `buy_in` + + +
public fun buy_in(self: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
+ + + + +
let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins);
+aborts_if self.total_coins + coins_amount > MAX_U64;
+aborts_if self.total_shares + new_shares > MAX_U128;
+include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares };
+include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares };
+ensures self.total_coins == old(self.total_coins) + coins_amount;
+ensures self.total_shares == old(self.total_shares) + new_shares;
+ensures result == new_shares;
+
+ + + + + +### Function `add_shares` + + +
fun add_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
+
+ + + + +
include AddSharesAbortsIf;
+include AddSharesEnsures;
+let key_exists = table::spec_contains(self.shares, shareholder);
+ensures result == if (key_exists) { table::spec_get(self.shares, shareholder) }
+else { new_shares };
+
+ + + + + + + +
schema AddSharesAbortsIf {
+    self: Pool;
+    shareholder: address;
+    new_shares: u64;
+    let key_exists = table::spec_contains(self.shares, shareholder);
+    let current_shares = table::spec_get(self.shares, shareholder);
+    aborts_if key_exists && current_shares + new_shares > MAX_U128;
+}
+
+ + + + + + + +
schema AddSharesEnsures {
+    self: Pool;
+    shareholder: address;
+    new_shares: u64;
+    let key_exists = table::spec_contains(self.shares, shareholder);
+    let current_shares = table::spec_get(self.shares, shareholder);
+    ensures key_exists ==>
+        self.shares == table::spec_set(old(self.shares), shareholder, current_shares + new_shares);
+    ensures (!key_exists && new_shares > 0) ==>
+        self.shares == table::spec_set(old(self.shares), shareholder, new_shares);
+}
+
+ + + + + + + +
fun spec_amount_to_shares_with_total_coins(pool: Pool, coins_amount: u64, total_coins: u64): u128 {
+   if (pool.total_coins == 0 || pool.total_shares == 0) {
+       coins_amount * pool.scaling_factor
+   }
+   else {
+       (coins_amount * pool.total_shares) / total_coins
+   }
+}
+
+ + + + + +### Function `redeem_shares` + + +
public fun redeem_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
+
+ + + + +
let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins);
+aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < shares_to_redeem;
+aborts_if self.total_coins < redeemed_coins;
+aborts_if self.total_shares < shares_to_redeem;
+ensures self.total_coins == old(self.total_coins) - redeemed_coins;
+ensures self.total_shares == old(self.total_shares) - shares_to_redeem;
+include shares_to_redeem > 0 ==> DeductSharesEnsures {
+    num_shares: shares_to_redeem
+};
+ensures result == redeemed_coins;
+
+ + + + + +### Function `transfer_shares` + + +
public fun transfer_shares(self: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
+
+ + + + +
aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(self, shareholder_2) &&
+    (spec_shares(self, shareholder_2) + shares_to_transfer > MAX_U128);
+aborts_if !spec_contains(self, shareholder_1);
+aborts_if spec_shares(self, shareholder_1) < shares_to_transfer;
+ensures shareholder_1 == shareholder_2 ==> spec_shares(old(self), shareholder_1) == spec_shares(
+    self, shareholder_1);
+ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) == shares_to_transfer)) ==>
+    !spec_contains(self, shareholder_1);
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0) ==>
+    (spec_contains(self, shareholder_2));
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(self), shareholder_2)) ==>
+    (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == shares_to_transfer);
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(self), shareholder_2)) ==>
+    (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == spec_shares(old(self), shareholder_2) + shares_to_transfer);
+ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) > shares_to_transfer)) ==>
+    (spec_contains(self, shareholder_1) && (spec_shares(self, shareholder_1) == spec_shares(old(self), shareholder_1) - shares_to_transfer));
+
+ + + + + +### Function `deduct_shares` + + +
fun deduct_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
+
+ + + + +
aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < num_shares;
+include DeductSharesEnsures;
+let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares;
+ensures remaining_shares > 0 ==> result == table::spec_get(self.shares, shareholder);
+ensures remaining_shares == 0 ==> result == 0;
+
+ + + + + + + +
schema DeductSharesEnsures {
+    self: Pool;
+    shareholder: address;
+    num_shares: u64;
+    let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares;
+    ensures remaining_shares > 0 ==> table::spec_get(self.shares, shareholder) == remaining_shares;
+    ensures remaining_shares == 0 ==> !table::spec_contains(self.shares, shareholder);
+}
+
+ + + + + +### Function `amount_to_shares_with_total_coins` + + +
public fun amount_to_shares_with_total_coins(self: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
+
+ + + + +
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (coins_amount * self.total_shares) / total_coins > MAX_U128;
+aborts_if (self.total_coins == 0 || self.total_shares == 0)
+    && coins_amount * self.scaling_factor > MAX_U128;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0;
+ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins);
+
+ + + + + +### Function `shares_to_amount_with_total_coins` + + +
public fun shares_to_amount_with_total_coins(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
+
+ + + + +
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
+
+ + + + + + + +
fun spec_shares_to_amount_with_total_coins(pool: Pool, shares: u128, total_coins: u64): u64 {
+   if (pool.total_coins == 0 || pool.total_shares == 0) {
+       0
+   }
+   else {
+       (shares * total_coins) / pool.total_shares
+   }
+}
+
+ + + + + +### Function `multiply_then_divide` + + +
public fun multiply_then_divide(self: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
+
+ + + + +
aborts_if z == 0;
+aborts_if (x * y) / z > MAX_U128;
+ensures result == (x * y) / z;
+
+ + + + + +### Function `to_u128` + + +
fun to_u128(num: u64): u128
+
+ + + + +
aborts_if false;
+ensures result == num;
+
+ + + + + +### Function `to_u256` + + +
fun to_u256(num: u128): u256
+
+ + + + +
aborts_if false;
+ensures result == num;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255.md new file mode 100644 index 0000000000000..15b42e8e8f61e --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255.md @@ -0,0 +1,3463 @@ + + + +# Module `0x1::ristretto255` + +This module contains functions for Ristretto255 curve arithmetic, assuming addition as the group operation. + +The order of the Ristretto255 elliptic curve group is $\ell = 2^252 + 27742317777372353535851937790883648493$, same +as the order of the prime-order subgroup of Curve25519. + +This module provides two structs for encoding Ristretto elliptic curves to the developer: + +- First, a 32-byte-sized CompressedRistretto struct, which is used to persist points in storage. + +- Second, a larger, in-memory, RistrettoPoint struct, which is decompressable from a CompressedRistretto struct. This +larger struct can be used for fast arithmetic operations (additions, multiplications, etc.). The results can be saved +back into storage by compressing RistrettoPoint structs back to CompressedRistretto structs. + +This module also provides a Scalar struct for persisting scalars in storage and doing fast arithmetic on them. + +One invariant maintained by this module is that all CompressedRistretto structs store a canonically-encoded point, +which can always be decompressed into a valid point on the curve as a RistrettoPoint struct. Unfortunately, due to +limitations in our underlying curve25519-dalek elliptic curve library, this decompression will unnecessarily verify +the validity of the point and thus slightly decrease performance. + +Similarly, all Scalar structs store a canonically-encoded scalar, which can always be safely operated on using +arithmetic operations. + +In the future, we might support additional features: + +* For scalars: +- batch_invert() + +* For points: +- double() ++ The challenge is that curve25519-dalek does NOT export double for Ristretto points (nor for Edwards) + +- double_and_compress_batch() + +- fixed-base, variable-time via optional_mixed_multiscalar_mul() in VartimePrecomputedMultiscalarMul ++ This would require a storage-friendly RistrettoBasepointTable and an in-memory variant of it too ++ Similar to the CompressedRistretto and RistrettoPoint structs in this module ++ The challenge is that curve25519-dalek's RistrettoBasepointTable is not serializable + + +- [Struct `Scalar`](#0x1_ristretto255_Scalar) +- [Struct `CompressedRistretto`](#0x1_ristretto255_CompressedRistretto) +- [Struct `RistrettoPoint`](#0x1_ristretto255_RistrettoPoint) +- [Constants](#@Constants_0) +- [Function `point_identity_compressed`](#0x1_ristretto255_point_identity_compressed) +- [Function `point_identity`](#0x1_ristretto255_point_identity) +- [Function `basepoint_compressed`](#0x1_ristretto255_basepoint_compressed) +- [Function `hash_to_point_base`](#0x1_ristretto255_hash_to_point_base) +- [Function `basepoint`](#0x1_ristretto255_basepoint) +- [Function `basepoint_mul`](#0x1_ristretto255_basepoint_mul) +- [Function `new_compressed_point_from_bytes`](#0x1_ristretto255_new_compressed_point_from_bytes) +- [Function `new_point_from_bytes`](#0x1_ristretto255_new_point_from_bytes) +- [Function `compressed_point_to_bytes`](#0x1_ristretto255_compressed_point_to_bytes) +- [Function `new_point_from_sha512`](#0x1_ristretto255_new_point_from_sha512) +- [Function `new_point_from_sha2_512`](#0x1_ristretto255_new_point_from_sha2_512) +- [Function `new_point_from_64_uniform_bytes`](#0x1_ristretto255_new_point_from_64_uniform_bytes) +- [Function `point_decompress`](#0x1_ristretto255_point_decompress) +- [Function `point_clone`](#0x1_ristretto255_point_clone) +- [Function `point_compress`](#0x1_ristretto255_point_compress) +- [Function `point_to_bytes`](#0x1_ristretto255_point_to_bytes) +- [Function `point_mul`](#0x1_ristretto255_point_mul) +- [Function `point_mul_assign`](#0x1_ristretto255_point_mul_assign) +- [Function `basepoint_double_mul`](#0x1_ristretto255_basepoint_double_mul) +- [Function `point_add`](#0x1_ristretto255_point_add) +- [Function `point_add_assign`](#0x1_ristretto255_point_add_assign) +- [Function `point_sub`](#0x1_ristretto255_point_sub) +- [Function `point_sub_assign`](#0x1_ristretto255_point_sub_assign) +- [Function `point_neg`](#0x1_ristretto255_point_neg) +- [Function `point_neg_assign`](#0x1_ristretto255_point_neg_assign) +- [Function `point_equals`](#0x1_ristretto255_point_equals) +- [Function `double_scalar_mul`](#0x1_ristretto255_double_scalar_mul) +- [Function `multi_scalar_mul`](#0x1_ristretto255_multi_scalar_mul) +- [Function `new_scalar_from_bytes`](#0x1_ristretto255_new_scalar_from_bytes) +- [Function `new_scalar_from_sha512`](#0x1_ristretto255_new_scalar_from_sha512) +- [Function `new_scalar_from_sha2_512`](#0x1_ristretto255_new_scalar_from_sha2_512) +- [Function `new_scalar_from_u8`](#0x1_ristretto255_new_scalar_from_u8) +- [Function `new_scalar_from_u32`](#0x1_ristretto255_new_scalar_from_u32) +- [Function `new_scalar_from_u64`](#0x1_ristretto255_new_scalar_from_u64) +- [Function `new_scalar_from_u128`](#0x1_ristretto255_new_scalar_from_u128) +- [Function `new_scalar_reduced_from_32_bytes`](#0x1_ristretto255_new_scalar_reduced_from_32_bytes) +- [Function `new_scalar_uniform_from_64_bytes`](#0x1_ristretto255_new_scalar_uniform_from_64_bytes) +- [Function `scalar_zero`](#0x1_ristretto255_scalar_zero) +- [Function `scalar_is_zero`](#0x1_ristretto255_scalar_is_zero) +- [Function `scalar_one`](#0x1_ristretto255_scalar_one) +- [Function `scalar_is_one`](#0x1_ristretto255_scalar_is_one) +- [Function `scalar_equals`](#0x1_ristretto255_scalar_equals) +- [Function `scalar_invert`](#0x1_ristretto255_scalar_invert) +- [Function `scalar_mul`](#0x1_ristretto255_scalar_mul) +- [Function `scalar_mul_assign`](#0x1_ristretto255_scalar_mul_assign) +- [Function `scalar_add`](#0x1_ristretto255_scalar_add) +- [Function `scalar_add_assign`](#0x1_ristretto255_scalar_add_assign) +- [Function `scalar_sub`](#0x1_ristretto255_scalar_sub) +- [Function `scalar_sub_assign`](#0x1_ristretto255_scalar_sub_assign) +- [Function `scalar_neg`](#0x1_ristretto255_scalar_neg) +- [Function `scalar_neg_assign`](#0x1_ristretto255_scalar_neg_assign) +- [Function `scalar_to_bytes`](#0x1_ristretto255_scalar_to_bytes) +- [Function `new_point_from_sha512_internal`](#0x1_ristretto255_new_point_from_sha512_internal) +- [Function `new_point_from_64_uniform_bytes_internal`](#0x1_ristretto255_new_point_from_64_uniform_bytes_internal) +- [Function `point_is_canonical_internal`](#0x1_ristretto255_point_is_canonical_internal) +- [Function `point_identity_internal`](#0x1_ristretto255_point_identity_internal) +- [Function `point_decompress_internal`](#0x1_ristretto255_point_decompress_internal) +- [Function `point_clone_internal`](#0x1_ristretto255_point_clone_internal) +- [Function `point_compress_internal`](#0x1_ristretto255_point_compress_internal) +- [Function `point_mul_internal`](#0x1_ristretto255_point_mul_internal) +- [Function `basepoint_mul_internal`](#0x1_ristretto255_basepoint_mul_internal) +- [Function `basepoint_double_mul_internal`](#0x1_ristretto255_basepoint_double_mul_internal) +- [Function `point_add_internal`](#0x1_ristretto255_point_add_internal) +- [Function `point_sub_internal`](#0x1_ristretto255_point_sub_internal) +- [Function `point_neg_internal`](#0x1_ristretto255_point_neg_internal) +- [Function `double_scalar_mul_internal`](#0x1_ristretto255_double_scalar_mul_internal) +- [Function `multi_scalar_mul_internal`](#0x1_ristretto255_multi_scalar_mul_internal) +- [Function `scalar_is_canonical_internal`](#0x1_ristretto255_scalar_is_canonical_internal) +- [Function `scalar_from_u64_internal`](#0x1_ristretto255_scalar_from_u64_internal) +- [Function `scalar_from_u128_internal`](#0x1_ristretto255_scalar_from_u128_internal) +- [Function `scalar_reduced_from_32_bytes_internal`](#0x1_ristretto255_scalar_reduced_from_32_bytes_internal) +- [Function `scalar_uniform_from_64_bytes_internal`](#0x1_ristretto255_scalar_uniform_from_64_bytes_internal) +- [Function `scalar_invert_internal`](#0x1_ristretto255_scalar_invert_internal) +- [Function `scalar_from_sha512_internal`](#0x1_ristretto255_scalar_from_sha512_internal) +- [Function `scalar_mul_internal`](#0x1_ristretto255_scalar_mul_internal) +- [Function `scalar_add_internal`](#0x1_ristretto255_scalar_add_internal) +- [Function `scalar_sub_internal`](#0x1_ristretto255_scalar_sub_internal) +- [Function `scalar_neg_internal`](#0x1_ristretto255_scalar_neg_internal) +- [Specification](#@Specification_1) + - [Helper functions](#@Helper_functions_2) + - [Function `point_equals`](#@Specification_1_point_equals) + - [Function `double_scalar_mul`](#@Specification_1_double_scalar_mul) + - [Function `multi_scalar_mul`](#@Specification_1_multi_scalar_mul) + - [Function `new_scalar_from_bytes`](#@Specification_1_new_scalar_from_bytes) + - [Function `new_scalar_from_sha2_512`](#@Specification_1_new_scalar_from_sha2_512) + - [Function `new_scalar_from_u8`](#@Specification_1_new_scalar_from_u8) + - [Function `new_scalar_from_u32`](#@Specification_1_new_scalar_from_u32) + - [Function `new_scalar_from_u64`](#@Specification_1_new_scalar_from_u64) + - [Function `new_scalar_from_u128`](#@Specification_1_new_scalar_from_u128) + - [Function `new_scalar_reduced_from_32_bytes`](#@Specification_1_new_scalar_reduced_from_32_bytes) + - [Function `new_scalar_uniform_from_64_bytes`](#@Specification_1_new_scalar_uniform_from_64_bytes) + - [Function `scalar_zero`](#@Specification_1_scalar_zero) + - [Function `scalar_is_zero`](#@Specification_1_scalar_is_zero) + - [Function `scalar_one`](#@Specification_1_scalar_one) + - [Function `scalar_is_one`](#@Specification_1_scalar_is_one) + - [Function `scalar_equals`](#@Specification_1_scalar_equals) + - [Function `scalar_invert`](#@Specification_1_scalar_invert) + - [Function `scalar_mul`](#@Specification_1_scalar_mul) + - [Function `scalar_mul_assign`](#@Specification_1_scalar_mul_assign) + - [Function `scalar_add`](#@Specification_1_scalar_add) + - [Function `scalar_add_assign`](#@Specification_1_scalar_add_assign) + - [Function `scalar_sub`](#@Specification_1_scalar_sub) + - [Function `scalar_sub_assign`](#@Specification_1_scalar_sub_assign) + - [Function `scalar_neg`](#@Specification_1_scalar_neg) + - [Function `scalar_neg_assign`](#@Specification_1_scalar_neg_assign) + - [Function `scalar_to_bytes`](#@Specification_1_scalar_to_bytes) + - [Function `new_point_from_sha512_internal`](#@Specification_1_new_point_from_sha512_internal) + - [Function `new_point_from_64_uniform_bytes_internal`](#@Specification_1_new_point_from_64_uniform_bytes_internal) + - [Function `point_is_canonical_internal`](#@Specification_1_point_is_canonical_internal) + - [Function `point_identity_internal`](#@Specification_1_point_identity_internal) + - [Function `point_decompress_internal`](#@Specification_1_point_decompress_internal) + - [Function `point_clone_internal`](#@Specification_1_point_clone_internal) + - [Function `point_compress_internal`](#@Specification_1_point_compress_internal) + - [Function `point_mul_internal`](#@Specification_1_point_mul_internal) + - [Function `basepoint_mul_internal`](#@Specification_1_basepoint_mul_internal) + - [Function `basepoint_double_mul_internal`](#@Specification_1_basepoint_double_mul_internal) + - [Function `point_add_internal`](#@Specification_1_point_add_internal) + - [Function `point_sub_internal`](#@Specification_1_point_sub_internal) + - [Function `point_neg_internal`](#@Specification_1_point_neg_internal) + - [Function `double_scalar_mul_internal`](#@Specification_1_double_scalar_mul_internal) + - [Function `multi_scalar_mul_internal`](#@Specification_1_multi_scalar_mul_internal) + - [Function `scalar_is_canonical_internal`](#@Specification_1_scalar_is_canonical_internal) + - [Function `scalar_from_u64_internal`](#@Specification_1_scalar_from_u64_internal) + - [Function `scalar_from_u128_internal`](#@Specification_1_scalar_from_u128_internal) + - [Function `scalar_reduced_from_32_bytes_internal`](#@Specification_1_scalar_reduced_from_32_bytes_internal) + - [Function `scalar_uniform_from_64_bytes_internal`](#@Specification_1_scalar_uniform_from_64_bytes_internal) + - [Function `scalar_invert_internal`](#@Specification_1_scalar_invert_internal) + - [Function `scalar_from_sha512_internal`](#@Specification_1_scalar_from_sha512_internal) + - [Function `scalar_mul_internal`](#@Specification_1_scalar_mul_internal) + - [Function `scalar_add_internal`](#@Specification_1_scalar_add_internal) + - [Function `scalar_sub_internal`](#@Specification_1_scalar_sub_internal) + - [Function `scalar_neg_internal`](#@Specification_1_scalar_neg_internal) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::option;
+use 0x1::vector;
+
+ + + + + +## Struct `Scalar` + +This struct represents a scalar as a little-endian byte encoding of an integer in $\mathbb{Z}_\ell$, which is +stored in data. Here, \ell denotes the order of the scalar field (and the underlying elliptic curve group). + + +
struct Scalar has copy, drop, store
+
+ + + +
+Fields + + +
+
+data: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `CompressedRistretto` + +This struct represents a serialized point on the Ristretto255 curve, in 32 bytes. +This struct can be decompressed from storage into an in-memory RistrettoPoint, on which fast curve arithmetic +can be performed. + + +
struct CompressedRistretto has copy, drop, store
+
+ + + +
+Fields + + +
+
+data: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `RistrettoPoint` + +This struct represents an in-memory Ristretto255 point and supports fast curve arithmetic. + +An important invariant: There will never be two RistrettoPoint's constructed with the same handle. One can have +immutable references to the same RistrettoPoint, of course. + + +
struct RistrettoPoint has drop
+
+ + + +
+Fields + + +
+
+handle: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The native function has not been deployed yet. + + +
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 5;
+
+ + + + + +The basepoint (generator) of the Ristretto255 group + + +
const BASE_POINT: vector<u8> = [226, 242, 174, 10, 106, 188, 78, 113, 168, 132, 169, 97, 197, 0, 81, 95, 88, 227, 11, 106, 165, 130, 221, 141, 182, 166, 89, 69, 224, 141, 45, 118];
+
+ + + + + +The number of scalars does not match the number of points. + + +
const E_DIFFERENT_NUM_POINTS_AND_SCALARS: u64 = 1;
+
+ + + + + +Too many points have been created in the current transaction execution. + + +
const E_TOO_MANY_POINTS_CREATED: u64 = 4;
+
+ + + + + +Expected more than zero points as input. + + +
const E_ZERO_POINTS: u64 = 2;
+
+ + + + + +Expected more than zero scalars as input. + + +
const E_ZERO_SCALARS: u64 = 3;
+
+ + + + + +The hash of the basepoint of the Ristretto255 group using SHA3_512 + + +
const HASH_BASE_POINT: vector<u8> = [140, 146, 64, 180, 86, 169, 230, 220, 101, 195, 119, 161, 4, 141, 116, 95, 148, 160, 140, 219, 127, 68, 203, 205, 123, 70, 243, 64, 72, 135, 17, 52];
+
+ + + + + +ORDER_ELL - 1: i.e., the "largest", reduced scalar in the field + + +
const L_MINUS_ONE: vector<u8> = [236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16];
+
+ + + + + +The maximum size in bytes of a canonically-encoded Ristretto255 point is 32 bytes. + + +
const MAX_POINT_NUM_BYTES: u64 = 32;
+
+ + + + + +The maximum size in bits of a canonically-encoded Scalar is 256 bits. + + +
const MAX_SCALAR_NUM_BITS: u64 = 256;
+
+ + + + + +The maximum size in bytes of a canonically-encoded Scalar is 32 bytes. + + +
const MAX_SCALAR_NUM_BYTES: u64 = 32;
+
+ + + + + +The order of the Ristretto255 group and its scalar field, in little-endian. + + +
const ORDER_ELL: vector<u8> = [237, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16];
+
+ + + + + +## Function `point_identity_compressed` + +Returns the identity point as a CompressedRistretto. + + +
public fun point_identity_compressed(): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun point_identity_compressed(): CompressedRistretto {
+    CompressedRistretto {
+        data: x"0000000000000000000000000000000000000000000000000000000000000000"
+    }
+}
+
+ + + +
+ + + +## Function `point_identity` + +Returns the identity point as a CompressedRistretto. + + +
public fun point_identity(): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_identity(): RistrettoPoint {
+    RistrettoPoint {
+        handle: point_identity_internal()
+    }
+}
+
+ + + +
+ + + +## Function `basepoint_compressed` + +Returns the basepoint (generator) of the Ristretto255 group as a compressed point + + +
public fun basepoint_compressed(): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun basepoint_compressed(): CompressedRistretto {
+    CompressedRistretto {
+        data: BASE_POINT
+    }
+}
+
+ + + +
+ + + +## Function `hash_to_point_base` + +Returns the hash-to-point result of serializing the basepoint of the Ristretto255 group. +For use as the random value basepoint in Pedersen commitments + + +
public fun hash_to_point_base(): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun hash_to_point_base(): RistrettoPoint {
+    let comp_res = CompressedRistretto { data: HASH_BASE_POINT };
+    point_decompress(&comp_res)
+}
+
+ + + +
+ + + +## Function `basepoint` + +Returns the basepoint (generator) of the Ristretto255 group + + +
public fun basepoint(): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun basepoint(): RistrettoPoint {
+    let (handle, _) = point_decompress_internal(BASE_POINT);
+
+    RistrettoPoint {
+        handle
+    }
+}
+
+ + + +
+ + + +## Function `basepoint_mul` + +Multiplies the basepoint (generator) of the Ristretto255 group by a scalar and returns the result. +This call is much faster than point_mul(&basepoint(), &some_scalar) because of precomputation tables. + + +
public fun basepoint_mul(a: &ristretto255::Scalar): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun basepoint_mul(a: &Scalar): RistrettoPoint {
+    RistrettoPoint {
+        handle: basepoint_mul_internal(a.data)
+    }
+}
+
+ + + +
+ + + +## Function `new_compressed_point_from_bytes` + +Creates a new CompressedRistretto point from a sequence of 32 bytes. If those bytes do not represent a valid +point, returns None. + + +
public fun new_compressed_point_from_bytes(bytes: vector<u8>): option::Option<ristretto255::CompressedRistretto>
+
+ + + +
+Implementation + + +
public fun new_compressed_point_from_bytes(bytes: vector<u8>): Option<CompressedRistretto> {
+    if (point_is_canonical_internal(bytes)) {
+        std::option::some(CompressedRistretto {
+            data: bytes
+        })
+    } else {
+        std::option::none<CompressedRistretto>()
+    }
+}
+
+ + + +
+ + + +## Function `new_point_from_bytes` + +Creates a new RistrettoPoint from a sequence of 32 bytes. If those bytes do not represent a valid point, +returns None. + + +
public fun new_point_from_bytes(bytes: vector<u8>): option::Option<ristretto255::RistrettoPoint>
+
+ + + +
+Implementation + + +
public fun new_point_from_bytes(bytes: vector<u8>): Option<RistrettoPoint> {
+    let (handle, is_canonical) = point_decompress_internal(bytes);
+    if (is_canonical) {
+        std::option::some(RistrettoPoint { handle })
+    } else {
+        std::option::none<RistrettoPoint>()
+    }
+}
+
+ + + +
+ + + +## Function `compressed_point_to_bytes` + +Given a compressed ristretto point point, returns the byte representation of that point + + +
public fun compressed_point_to_bytes(point: ristretto255::CompressedRistretto): vector<u8>
+
+ + + +
+Implementation + + +
public fun compressed_point_to_bytes(point: CompressedRistretto): vector<u8> {
+    point.data
+}
+
+ + + +
+ + + +## Function `new_point_from_sha512` + +DEPRECATED: Use the more clearly-named new_point_from_sha2_512 + +Hashes the input to a uniformly-at-random RistrettoPoint via SHA512. + + +
public fun new_point_from_sha512(sha2_512_input: vector<u8>): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun new_point_from_sha512(sha2_512_input: vector<u8>): RistrettoPoint {
+    new_point_from_sha2_512(sha2_512_input)
+}
+
+ + + +
+ + + +## Function `new_point_from_sha2_512` + +Hashes the input to a uniformly-at-random RistrettoPoint via SHA2-512. + + +
public fun new_point_from_sha2_512(sha2_512_input: vector<u8>): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun new_point_from_sha2_512(sha2_512_input: vector<u8>): RistrettoPoint {
+    RistrettoPoint {
+        handle: new_point_from_sha512_internal(sha2_512_input)
+    }
+}
+
+ + + +
+ + + +## Function `new_point_from_64_uniform_bytes` + +Samples a uniformly-at-random RistrettoPoint given a sequence of 64 uniformly-at-random bytes. This function +can be used to build a collision-resistant hash function that maps 64-byte messages to RistrettoPoint's. + + +
public fun new_point_from_64_uniform_bytes(bytes: vector<u8>): option::Option<ristretto255::RistrettoPoint>
+
+ + + +
+Implementation + + +
public fun new_point_from_64_uniform_bytes(bytes: vector<u8>): Option<RistrettoPoint> {
+    if (std::vector::length(&bytes) == 64) {
+        std::option::some(RistrettoPoint {
+            handle: new_point_from_64_uniform_bytes_internal(bytes)
+        })
+    } else {
+        std::option::none<RistrettoPoint>()
+    }
+}
+
+ + + +
+ + + +## Function `point_decompress` + +Decompresses a CompressedRistretto from storage into a RistrettoPoint which can be used for fast arithmetic. + + +
public fun point_decompress(point: &ristretto255::CompressedRistretto): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_decompress(point: &CompressedRistretto): RistrettoPoint {
+    // NOTE: Our CompressedRistretto invariant assures us that every CompressedRistretto in storage is a valid
+    // RistrettoPoint
+    let (handle, _) = point_decompress_internal(point.data);
+    RistrettoPoint { handle }
+}
+
+ + + +
+ + + +## Function `point_clone` + +Clones a RistrettoPoint. + + +
public fun point_clone(point: &ristretto255::RistrettoPoint): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_clone(point: &RistrettoPoint): RistrettoPoint {
+    if(!features::bulletproofs_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    RistrettoPoint {
+        handle: point_clone_internal(point.handle)
+    }
+}
+
+ + + +
+ + + +## Function `point_compress` + +Compresses a RistrettoPoint to a CompressedRistretto which can be put in storage. + + +
public fun point_compress(point: &ristretto255::RistrettoPoint): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun point_compress(point: &RistrettoPoint): CompressedRistretto {
+    CompressedRistretto {
+        data: point_compress_internal(point)
+    }
+}
+
+ + + +
+ + + +## Function `point_to_bytes` + +Returns the sequence of bytes representin this Ristretto point. +To convert a RistrettoPoint 'p' to bytes, first compress it via c = point_compress(&p), and then call this +function on c. + + +
public fun point_to_bytes(point: &ristretto255::CompressedRistretto): vector<u8>
+
+ + + +
+Implementation + + +
public fun point_to_bytes(point: &CompressedRistretto): vector<u8> {
+    point.data
+}
+
+ + + +
+ + + +## Function `point_mul` + +Returns a * point. + + +
public fun point_mul(point: &ristretto255::RistrettoPoint, a: &ristretto255::Scalar): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_mul(point: &RistrettoPoint, a: &Scalar): RistrettoPoint {
+    RistrettoPoint {
+        handle: point_mul_internal(point, a.data, false)
+    }
+}
+
+ + + +
+ + + +## Function `point_mul_assign` + +Sets a *= point and returns 'a'. + + +
public fun point_mul_assign(point: &mut ristretto255::RistrettoPoint, a: &ristretto255::Scalar): &mut ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_mul_assign(point: &mut RistrettoPoint, a: &Scalar): &mut RistrettoPoint {
+    point_mul_internal(point, a.data, true);
+    point
+}
+
+ + + +
+ + + +## Function `basepoint_double_mul` + +Returns (a * a_base + b * base_point), where base_point is the Ristretto basepoint encoded in BASE_POINT. + + +
public fun basepoint_double_mul(a: &ristretto255::Scalar, a_base: &ristretto255::RistrettoPoint, b: &ristretto255::Scalar): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun basepoint_double_mul(a: &Scalar, a_base: &RistrettoPoint, b: &Scalar): RistrettoPoint {
+    RistrettoPoint {
+        handle: basepoint_double_mul_internal(a.data, a_base, b.data)
+    }
+}
+
+ + + +
+ + + +## Function `point_add` + +Returns a + b + + +
public fun point_add(a: &ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_add(a: &RistrettoPoint, b: &RistrettoPoint): RistrettoPoint {
+    RistrettoPoint {
+        handle: point_add_internal(a, b, false)
+    }
+}
+
+ + + +
+ + + +## Function `point_add_assign` + +Sets a += b and returns 'a'. + + +
public fun point_add_assign(a: &mut ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint): &mut ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_add_assign(a: &mut RistrettoPoint, b: &RistrettoPoint): &mut RistrettoPoint {
+    point_add_internal(a, b, true);
+    a
+}
+
+ + + +
+ + + +## Function `point_sub` + +Returns a - b + + +
public fun point_sub(a: &ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_sub(a: &RistrettoPoint, b: &RistrettoPoint): RistrettoPoint {
+    RistrettoPoint {
+        handle: point_sub_internal(a, b, false)
+    }
+}
+
+ + + +
+ + + +## Function `point_sub_assign` + +Sets a -= b and returns 'a'. + + +
public fun point_sub_assign(a: &mut ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint): &mut ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_sub_assign(a: &mut RistrettoPoint, b: &RistrettoPoint): &mut RistrettoPoint {
+    point_sub_internal(a, b, true);
+    a
+}
+
+ + + +
+ + + +## Function `point_neg` + +Returns -a + + +
public fun point_neg(a: &ristretto255::RistrettoPoint): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_neg(a: &RistrettoPoint): RistrettoPoint {
+    RistrettoPoint {
+        handle: point_neg_internal(a, false)
+    }
+}
+
+ + + +
+ + + +## Function `point_neg_assign` + +Sets a = -a, and returns 'a'. + + +
public fun point_neg_assign(a: &mut ristretto255::RistrettoPoint): &mut ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun point_neg_assign(a: &mut RistrettoPoint): &mut RistrettoPoint {
+    point_neg_internal(a, true);
+    a
+}
+
+ + + +
+ + + +## Function `point_equals` + +Returns true if the two RistrettoPoints are the same points on the elliptic curve. + + +
public fun point_equals(g: &ristretto255::RistrettoPoint, h: &ristretto255::RistrettoPoint): bool
+
+ + + +
+Implementation + + +
native public fun point_equals(g: &RistrettoPoint, h: &RistrettoPoint): bool;
+
+ + + +
+ + + +## Function `double_scalar_mul` + +Computes a double-scalar multiplication, returning a_1 p_1 + a_2 p_2 +This function is much faster than computing each a_i p_i using point_mul and adding up the results using point_add. + + +
public fun double_scalar_mul(scalar1: &ristretto255::Scalar, point1: &ristretto255::RistrettoPoint, scalar2: &ristretto255::Scalar, point2: &ristretto255::RistrettoPoint): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun double_scalar_mul(scalar1: &Scalar, point1: &RistrettoPoint, scalar2: &Scalar, point2: &RistrettoPoint): RistrettoPoint {
+    if(!features::bulletproofs_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    RistrettoPoint {
+        handle: double_scalar_mul_internal(point1.handle, point2.handle, scalar1.data, scalar2.data)
+    }
+}
+
+ + + +
+ + + +## Function `multi_scalar_mul` + +Computes a multi-scalar multiplication, returning a_1 p_1 + a_2 p_2 + ... + a_n p_n. +This function is much faster than computing each a_i p_i using point_mul and adding up the results using point_add. + + +
public fun multi_scalar_mul(points: &vector<ristretto255::RistrettoPoint>, scalars: &vector<ristretto255::Scalar>): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun multi_scalar_mul(points: &vector<RistrettoPoint>, scalars: &vector<Scalar>): RistrettoPoint {
+    assert!(!std::vector::is_empty(points), std::error::invalid_argument(E_ZERO_POINTS));
+    assert!(!std::vector::is_empty(scalars), std::error::invalid_argument(E_ZERO_SCALARS));
+    assert!(std::vector::length(points) == std::vector::length(scalars), std::error::invalid_argument(E_DIFFERENT_NUM_POINTS_AND_SCALARS));
+
+    RistrettoPoint {
+        handle: multi_scalar_mul_internal<RistrettoPoint, Scalar>(points, scalars)
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_from_bytes` + +Given a sequence of 32 bytes, checks if they canonically-encode a Scalar and return it. +Otherwise, returns None. + + +
public fun new_scalar_from_bytes(bytes: vector<u8>): option::Option<ristretto255::Scalar>
+
+ + + +
+Implementation + + +
public fun new_scalar_from_bytes(bytes: vector<u8>): Option<Scalar> {
+    if (scalar_is_canonical_internal(bytes)) {
+        std::option::some(Scalar {
+            data: bytes
+        })
+    } else {
+        std::option::none<Scalar>()
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_from_sha512` + +DEPRECATED: Use the more clearly-named new_scalar_from_sha2_512 + +Hashes the input to a uniformly-at-random Scalar via SHA2-512 + + +
public fun new_scalar_from_sha512(sha2_512_input: vector<u8>): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun new_scalar_from_sha512(sha2_512_input: vector<u8>): Scalar {
+    new_scalar_from_sha2_512(sha2_512_input)
+}
+
+ + + +
+ + + +## Function `new_scalar_from_sha2_512` + +Hashes the input to a uniformly-at-random Scalar via SHA2-512 + + +
public fun new_scalar_from_sha2_512(sha2_512_input: vector<u8>): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun new_scalar_from_sha2_512(sha2_512_input: vector<u8>): Scalar {
+    Scalar {
+        data: scalar_from_sha512_internal(sha2_512_input)
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_from_u8` + +Creates a Scalar from an u8. + + +
public fun new_scalar_from_u8(byte: u8): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun new_scalar_from_u8(byte: u8): Scalar {
+    let s = scalar_zero();
+    let byte_zero = std::vector::borrow_mut(&mut s.data, 0);
+    *byte_zero = byte;
+
+    s
+}
+
+ + + +
+ + + +## Function `new_scalar_from_u32` + +Creates a Scalar from an u32. + + +
public fun new_scalar_from_u32(four_bytes: u32): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun new_scalar_from_u32(four_bytes: u32): Scalar {
+    Scalar {
+        data: scalar_from_u64_internal((four_bytes as u64))
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_from_u64` + +Creates a Scalar from an u64. + + +
public fun new_scalar_from_u64(eight_bytes: u64): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun new_scalar_from_u64(eight_bytes: u64): Scalar {
+    Scalar {
+        data: scalar_from_u64_internal(eight_bytes)
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_from_u128` + +Creates a Scalar from an u128. + + +
public fun new_scalar_from_u128(sixteen_bytes: u128): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun new_scalar_from_u128(sixteen_bytes: u128): Scalar {
+    Scalar {
+        data: scalar_from_u128_internal(sixteen_bytes)
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_reduced_from_32_bytes` + +Creates a Scalar from 32 bytes by reducing the little-endian-encoded number in those bytes modulo $\ell$. + + +
public fun new_scalar_reduced_from_32_bytes(bytes: vector<u8>): option::Option<ristretto255::Scalar>
+
+ + + +
+Implementation + + +
public fun new_scalar_reduced_from_32_bytes(bytes: vector<u8>): Option<Scalar> {
+    if (std::vector::length(&bytes) == 32) {
+        std::option::some(Scalar {
+            data: scalar_reduced_from_32_bytes_internal(bytes)
+        })
+    } else {
+        std::option::none()
+    }
+}
+
+ + + +
+ + + +## Function `new_scalar_uniform_from_64_bytes` + +Samples a scalar uniformly-at-random given 64 uniform-at-random bytes as input by reducing the little-endian-encoded number +in those bytes modulo $\ell$. + + +
public fun new_scalar_uniform_from_64_bytes(bytes: vector<u8>): option::Option<ristretto255::Scalar>
+
+ + + +
+Implementation + + +
public fun new_scalar_uniform_from_64_bytes(bytes: vector<u8>): Option<Scalar> {
+    if (std::vector::length(&bytes) == 64) {
+        std::option::some(Scalar {
+            data: scalar_uniform_from_64_bytes_internal(bytes)
+        })
+    } else {
+        std::option::none()
+    }
+}
+
+ + + +
+ + + +## Function `scalar_zero` + +Returns 0 as a Scalar. + + +
public fun scalar_zero(): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_zero(): Scalar {
+    Scalar {
+        data: x"0000000000000000000000000000000000000000000000000000000000000000"
+    }
+}
+
+ + + +
+ + + +## Function `scalar_is_zero` + +Returns true if the given Scalar equals 0. + + +
public fun scalar_is_zero(s: &ristretto255::Scalar): bool
+
+ + + +
+Implementation + + +
public fun scalar_is_zero(s: &Scalar): bool {
+    s.data == x"0000000000000000000000000000000000000000000000000000000000000000"
+}
+
+ + + +
+ + + +## Function `scalar_one` + +Returns 1 as a Scalar. + + +
public fun scalar_one(): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_one(): Scalar {
+    Scalar {
+        data: x"0100000000000000000000000000000000000000000000000000000000000000"
+    }
+}
+
+ + + +
+ + + +## Function `scalar_is_one` + +Returns true if the given Scalar equals 1. + + +
public fun scalar_is_one(s: &ristretto255::Scalar): bool
+
+ + + +
+Implementation + + +
public fun scalar_is_one(s: &Scalar): bool {
+    s.data == x"0100000000000000000000000000000000000000000000000000000000000000"
+}
+
+ + + +
+ + + +## Function `scalar_equals` + +Returns true if the two scalars are equal. + + +
public fun scalar_equals(lhs: &ristretto255::Scalar, rhs: &ristretto255::Scalar): bool
+
+ + + +
+Implementation + + +
public fun scalar_equals(lhs: &Scalar, rhs: &Scalar): bool {
+    lhs.data == rhs.data
+}
+
+ + + +
+ + + +## Function `scalar_invert` + +Returns the inverse s^{-1} mod \ell of a scalar s. +Returns None if s is zero. + + +
public fun scalar_invert(s: &ristretto255::Scalar): option::Option<ristretto255::Scalar>
+
+ + + +
+Implementation + + +
public fun scalar_invert(s: &Scalar): Option<Scalar> {
+    if (scalar_is_zero(s)) {
+        std::option::none<Scalar>()
+    } else {
+        std::option::some(Scalar {
+            data: scalar_invert_internal(s.data)
+        })
+    }
+}
+
+ + + +
+ + + +## Function `scalar_mul` + +Returns the product of the two scalars. + + +
public fun scalar_mul(a: &ristretto255::Scalar, b: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_mul(a: &Scalar, b: &Scalar): Scalar {
+    Scalar {
+        data: scalar_mul_internal(a.data, b.data)
+    }
+}
+
+ + + +
+ + + +## Function `scalar_mul_assign` + +Computes the product of 'a' and 'b' and assigns the result to 'a'. +Returns 'a'. + + +
public fun scalar_mul_assign(a: &mut ristretto255::Scalar, b: &ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_mul_assign(a: &mut Scalar, b: &Scalar): &mut Scalar {
+    a.data = scalar_mul(a, b).data;
+    a
+}
+
+ + + +
+ + + +## Function `scalar_add` + +Returns the sum of the two scalars. + + +
public fun scalar_add(a: &ristretto255::Scalar, b: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_add(a: &Scalar, b: &Scalar): Scalar {
+    Scalar {
+        data: scalar_add_internal(a.data, b.data)
+    }
+}
+
+ + + +
+ + + +## Function `scalar_add_assign` + +Computes the sum of 'a' and 'b' and assigns the result to 'a' +Returns 'a'. + + +
public fun scalar_add_assign(a: &mut ristretto255::Scalar, b: &ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_add_assign(a: &mut Scalar, b: &Scalar): &mut Scalar {
+    a.data = scalar_add(a, b).data;
+    a
+}
+
+ + + +
+ + + +## Function `scalar_sub` + +Returns the difference of the two scalars. + + +
public fun scalar_sub(a: &ristretto255::Scalar, b: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_sub(a: &Scalar, b: &Scalar): Scalar {
+    Scalar {
+        data: scalar_sub_internal(a.data, b.data)
+    }
+}
+
+ + + +
+ + + +## Function `scalar_sub_assign` + +Subtracts 'b' from 'a' and assigns the result to 'a'. +Returns 'a'. + + +
public fun scalar_sub_assign(a: &mut ristretto255::Scalar, b: &ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_sub_assign(a: &mut Scalar, b: &Scalar): &mut Scalar {
+    a.data = scalar_sub(a, b).data;
+    a
+}
+
+ + + +
+ + + +## Function `scalar_neg` + +Returns the negation of 'a': i.e., $(0 - a) \mod \ell$. + + +
public fun scalar_neg(a: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_neg(a: &Scalar): Scalar {
+    Scalar {
+        data: scalar_neg_internal(a.data)
+    }
+}
+
+ + + +
+ + + +## Function `scalar_neg_assign` + +Replaces 'a' by its negation. +Returns 'a'. + + +
public fun scalar_neg_assign(a: &mut ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + +
+Implementation + + +
public fun scalar_neg_assign(a: &mut Scalar): &mut Scalar {
+    a.data = scalar_neg(a).data;
+    a
+}
+
+ + + +
+ + + +## Function `scalar_to_bytes` + +Returns the byte-representation of the scalar. + + +
public fun scalar_to_bytes(s: &ristretto255::Scalar): vector<u8>
+
+ + + +
+Implementation + + +
public fun scalar_to_bytes(s: &Scalar): vector<u8> {
+    s.data
+}
+
+ + + +
+ + + +## Function `new_point_from_sha512_internal` + + + +
fun new_point_from_sha512_internal(sha2_512_input: vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun new_point_from_sha512_internal(sha2_512_input: vector<u8>): u64;
+
+ + + +
+ + + +## Function `new_point_from_64_uniform_bytes_internal` + + + +
fun new_point_from_64_uniform_bytes_internal(bytes: vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun new_point_from_64_uniform_bytes_internal(bytes: vector<u8>): u64;
+
+ + + +
+ + + +## Function `point_is_canonical_internal` + + + +
fun point_is_canonical_internal(bytes: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun point_is_canonical_internal(bytes: vector<u8>): bool;
+
+ + + +
+ + + +## Function `point_identity_internal` + + + +
fun point_identity_internal(): u64
+
+ + + +
+Implementation + + +
native fun point_identity_internal(): u64;
+
+ + + +
+ + + +## Function `point_decompress_internal` + + + +
fun point_decompress_internal(maybe_non_canonical_bytes: vector<u8>): (u64, bool)
+
+ + + +
+Implementation + + +
native fun point_decompress_internal(maybe_non_canonical_bytes: vector<u8>): (u64, bool);
+
+ + + +
+ + + +## Function `point_clone_internal` + + + +
fun point_clone_internal(point_handle: u64): u64
+
+ + + +
+Implementation + + +
native fun point_clone_internal(point_handle: u64): u64;
+
+ + + +
+ + + +## Function `point_compress_internal` + + + +
fun point_compress_internal(point: &ristretto255::RistrettoPoint): vector<u8>
+
+ + + +
+Implementation + + +
native fun point_compress_internal(point: &RistrettoPoint): vector<u8>;
+
+ + + +
+ + + +## Function `point_mul_internal` + + + +
fun point_mul_internal(point: &ristretto255::RistrettoPoint, a: vector<u8>, in_place: bool): u64
+
+ + + +
+Implementation + + +
native fun point_mul_internal(point: &RistrettoPoint, a: vector<u8>, in_place: bool): u64;
+
+ + + +
+ + + +## Function `basepoint_mul_internal` + + + +
fun basepoint_mul_internal(a: vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun basepoint_mul_internal(a: vector<u8>): u64;
+
+ + + +
+ + + +## Function `basepoint_double_mul_internal` + + + +
fun basepoint_double_mul_internal(a: vector<u8>, some_point: &ristretto255::RistrettoPoint, b: vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun basepoint_double_mul_internal(a: vector<u8>, some_point: &RistrettoPoint, b: vector<u8>): u64;
+
+ + + +
+ + + +## Function `point_add_internal` + + + +
fun point_add_internal(a: &ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint, in_place: bool): u64
+
+ + + +
+Implementation + + +
native fun point_add_internal(a: &RistrettoPoint, b: &RistrettoPoint, in_place: bool): u64;
+
+ + + +
+ + + +## Function `point_sub_internal` + + + +
fun point_sub_internal(a: &ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint, in_place: bool): u64
+
+ + + +
+Implementation + + +
native fun point_sub_internal(a: &RistrettoPoint, b: &RistrettoPoint, in_place: bool): u64;
+
+ + + +
+ + + +## Function `point_neg_internal` + + + +
fun point_neg_internal(a: &ristretto255::RistrettoPoint, in_place: bool): u64
+
+ + + +
+Implementation + + +
native fun point_neg_internal(a: &RistrettoPoint, in_place: bool): u64;
+
+ + + +
+ + + +## Function `double_scalar_mul_internal` + + + +
fun double_scalar_mul_internal(point1: u64, point2: u64, scalar1: vector<u8>, scalar2: vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun double_scalar_mul_internal(point1: u64, point2: u64, scalar1: vector<u8>, scalar2: vector<u8>): u64;
+
+ + + +
+ + + +## Function `multi_scalar_mul_internal` + +The generic arguments are needed to deal with some Move VM peculiarities which prevent us from borrowing the +points (or scalars) inside a &vector in Rust. + +WARNING: This function can only be called with P = RistrettoPoint and S = Scalar. + + +
fun multi_scalar_mul_internal<P, S>(points: &vector<P>, scalars: &vector<S>): u64
+
+ + + +
+Implementation + + +
native fun multi_scalar_mul_internal<P, S>(points: &vector<P>, scalars: &vector<S>): u64;
+
+ + + +
+ + + +## Function `scalar_is_canonical_internal` + + + +
fun scalar_is_canonical_internal(s: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun scalar_is_canonical_internal(s: vector<u8>): bool;
+
+ + + +
+ + + +## Function `scalar_from_u64_internal` + + + +
fun scalar_from_u64_internal(num: u64): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_from_u64_internal(num: u64): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_from_u128_internal` + + + +
fun scalar_from_u128_internal(num: u128): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_from_u128_internal(num: u128): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_reduced_from_32_bytes_internal` + + + +
fun scalar_reduced_from_32_bytes_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_reduced_from_32_bytes_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_uniform_from_64_bytes_internal` + + + +
fun scalar_uniform_from_64_bytes_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_uniform_from_64_bytes_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_invert_internal` + + + +
fun scalar_invert_internal(bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_invert_internal(bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_from_sha512_internal` + + + +
fun scalar_from_sha512_internal(sha2_512_input: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_from_sha512_internal(sha2_512_input: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_mul_internal` + + + +
fun scalar_mul_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_mul_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_add_internal` + + + +
fun scalar_add_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_add_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_sub_internal` + + + +
fun scalar_sub_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_sub_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `scalar_neg_internal` + + + +
fun scalar_neg_internal(a_bytes: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun scalar_neg_internal(a_bytes: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Specification + + + + +### Helper functions + + + + + + +
fun spec_scalar_is_zero(s: Scalar): bool {
+   s.data == x"0000000000000000000000000000000000000000000000000000000000000000"
+}
+
+ + + + + + + +
fun spec_scalar_is_one(s: Scalar): bool {
+   s.data == x"0100000000000000000000000000000000000000000000000000000000000000"
+}
+
+ + + + + + + +
fun spec_point_is_canonical_internal(bytes: vector<u8>): bool;
+
+ + + + + + + +
fun spec_double_scalar_mul_internal(point1: u64, point2: u64, scalar1: vector<u8>, scalar2: vector<u8>): u64;
+
+ + + + + + + +
fun spec_multi_scalar_mul_internal<P, S>(points: vector<P>, scalars: vector<S>): u64;
+
+ + + + + + + +
fun spec_scalar_is_canonical_internal(s: vector<u8>): bool;
+
+ + + + + + + +
fun spec_scalar_from_u64_internal(num: u64): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_from_u128_internal(num: u128): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_reduced_from_32_bytes_internal(bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_uniform_from_64_bytes_internal(bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_invert_internal(bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_from_sha512_internal(sha2_512_input: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_mul_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_add_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_sub_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_neg_internal(a_bytes: vector<u8>): vector<u8>;
+
+ + + + + +### Function `point_equals` + + +
public fun point_equals(g: &ristretto255::RistrettoPoint, h: &ristretto255::RistrettoPoint): bool
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `double_scalar_mul` + + +
public fun double_scalar_mul(scalar1: &ristretto255::Scalar, point1: &ristretto255::RistrettoPoint, scalar2: &ristretto255::Scalar, point2: &ristretto255::RistrettoPoint): ristretto255::RistrettoPoint
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `multi_scalar_mul` + + +
public fun multi_scalar_mul(points: &vector<ristretto255::RistrettoPoint>, scalars: &vector<ristretto255::Scalar>): ristretto255::RistrettoPoint
+
+ + + + +
aborts_if len(points) == 0;
+aborts_if len(scalars) == 0;
+aborts_if len(points) != len(scalars);
+ensures result.handle == spec_multi_scalar_mul_internal(points, scalars);
+
+ + + + + +### Function `new_scalar_from_bytes` + + +
public fun new_scalar_from_bytes(bytes: vector<u8>): option::Option<ristretto255::Scalar>
+
+ + + + +
aborts_if false;
+ensures spec_scalar_is_canonical_internal(bytes) ==> (std::option::spec_is_some(result)
+    && std::option::spec_borrow(result).data == bytes);
+ensures !spec_scalar_is_canonical_internal(bytes) ==> std::option::spec_is_none(result);
+
+ + + + + +### Function `new_scalar_from_sha2_512` + + +
public fun new_scalar_from_sha2_512(sha2_512_input: vector<u8>): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_from_sha512_internal(sha2_512_input);
+
+ + + + + +### Function `new_scalar_from_u8` + + +
public fun new_scalar_from_u8(byte: u8): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data[0] == byte;
+ensures forall i in 1..len(result.data): result.data[i] == 0;
+
+ + + + + +### Function `new_scalar_from_u32` + + +
public fun new_scalar_from_u32(four_bytes: u32): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_from_u64_internal(four_bytes);
+
+ + + + + +### Function `new_scalar_from_u64` + + +
public fun new_scalar_from_u64(eight_bytes: u64): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_from_u64_internal(eight_bytes);
+
+ + + + + +### Function `new_scalar_from_u128` + + +
public fun new_scalar_from_u128(sixteen_bytes: u128): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_from_u128_internal(sixteen_bytes);
+
+ + + + + +### Function `new_scalar_reduced_from_32_bytes` + + +
public fun new_scalar_reduced_from_32_bytes(bytes: vector<u8>): option::Option<ristretto255::Scalar>
+
+ + + + +
ensures len(bytes) != 32 ==> std::option::spec_is_none(result);
+ensures len(bytes) == 32 ==> std::option::spec_borrow(result).data == spec_scalar_reduced_from_32_bytes_internal(bytes);
+
+ + + + + +### Function `new_scalar_uniform_from_64_bytes` + + +
public fun new_scalar_uniform_from_64_bytes(bytes: vector<u8>): option::Option<ristretto255::Scalar>
+
+ + + + +
ensures len(bytes) != 64 ==> std::option::spec_is_none(result);
+ensures len(bytes) == 64 ==> std::option::spec_borrow(result).data == spec_scalar_uniform_from_64_bytes_internal(bytes);
+
+ + + + + +### Function `scalar_zero` + + +
public fun scalar_zero(): ristretto255::Scalar
+
+ + + + +
ensures spec_scalar_is_zero(result);
+
+ + + + + +### Function `scalar_is_zero` + + +
public fun scalar_is_zero(s: &ristretto255::Scalar): bool
+
+ + + + +
ensures result == spec_scalar_is_zero(s);
+
+ + + + + +### Function `scalar_one` + + +
public fun scalar_one(): ristretto255::Scalar
+
+ + + + +
ensures spec_scalar_is_one(result);
+
+ + + + + +### Function `scalar_is_one` + + +
public fun scalar_is_one(s: &ristretto255::Scalar): bool
+
+ + + + +
ensures result == spec_scalar_is_one(s);
+
+ + + + + +### Function `scalar_equals` + + +
public fun scalar_equals(lhs: &ristretto255::Scalar, rhs: &ristretto255::Scalar): bool
+
+ + + + +
aborts_if false;
+ensures result == (lhs.data == rhs.data);
+
+ + + + + +### Function `scalar_invert` + + +
public fun scalar_invert(s: &ristretto255::Scalar): option::Option<ristretto255::Scalar>
+
+ + + + +
aborts_if false;
+ensures spec_scalar_is_zero(s) ==> std::option::spec_is_none(result);
+ensures !spec_scalar_is_zero(s) ==> (std::option::spec_is_some(result) && std::option::spec_borrow(result).data == spec_scalar_invert_internal(s.data));
+
+ + + + + +### Function `scalar_mul` + + +
public fun scalar_mul(a: &ristretto255::Scalar, b: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_mul_internal(a.data, b.data);
+
+ + + + + +### Function `scalar_mul_assign` + + +
public fun scalar_mul_assign(a: &mut ristretto255::Scalar, b: &ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures a.data == spec_scalar_mul_internal(old(a).data, b.data);
+
+ + + + + +### Function `scalar_add` + + +
public fun scalar_add(a: &ristretto255::Scalar, b: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_add_internal(a.data, b.data);
+
+ + + + + +### Function `scalar_add_assign` + + +
public fun scalar_add_assign(a: &mut ristretto255::Scalar, b: &ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures a.data == spec_scalar_add_internal(old(a).data, b.data);
+
+ + + + + +### Function `scalar_sub` + + +
public fun scalar_sub(a: &ristretto255::Scalar, b: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures result.data == spec_scalar_sub_internal(a.data, b.data);
+
+ + + + + +### Function `scalar_sub_assign` + + +
public fun scalar_sub_assign(a: &mut ristretto255::Scalar, b: &ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures a.data == spec_scalar_sub_internal(old(a).data, b.data);
+
+ + + + + +### Function `scalar_neg` + + +
public fun scalar_neg(a: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result.data == spec_scalar_neg_internal(a.data);
+
+ + + + + +### Function `scalar_neg_assign` + + +
public fun scalar_neg_assign(a: &mut ristretto255::Scalar): &mut ristretto255::Scalar
+
+ + + + +
aborts_if false;
+ensures a.data == spec_scalar_neg_internal(old(a).data);
+
+ + + + + +### Function `scalar_to_bytes` + + +
public fun scalar_to_bytes(s: &ristretto255::Scalar): vector<u8>
+
+ + + + +
aborts_if false;
+ensures result == s.data;
+
+ + + + + +### Function `new_point_from_sha512_internal` + + +
fun new_point_from_sha512_internal(sha2_512_input: vector<u8>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `new_point_from_64_uniform_bytes_internal` + + +
fun new_point_from_64_uniform_bytes_internal(bytes: vector<u8>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_is_canonical_internal` + + +
fun point_is_canonical_internal(bytes: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_point_is_canonical_internal(bytes);
+
+ + + + + +### Function `point_identity_internal` + + +
fun point_identity_internal(): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_decompress_internal` + + +
fun point_decompress_internal(maybe_non_canonical_bytes: vector<u8>): (u64, bool)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_clone_internal` + + +
fun point_clone_internal(point_handle: u64): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_compress_internal` + + +
fun point_compress_internal(point: &ristretto255::RistrettoPoint): vector<u8>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_mul_internal` + + +
fun point_mul_internal(point: &ristretto255::RistrettoPoint, a: vector<u8>, in_place: bool): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `basepoint_mul_internal` + + +
fun basepoint_mul_internal(a: vector<u8>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `basepoint_double_mul_internal` + + +
fun basepoint_double_mul_internal(a: vector<u8>, some_point: &ristretto255::RistrettoPoint, b: vector<u8>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_add_internal` + + +
fun point_add_internal(a: &ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint, in_place: bool): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_sub_internal` + + +
fun point_sub_internal(a: &ristretto255::RistrettoPoint, b: &ristretto255::RistrettoPoint, in_place: bool): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `point_neg_internal` + + +
fun point_neg_internal(a: &ristretto255::RistrettoPoint, in_place: bool): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `double_scalar_mul_internal` + + +
fun double_scalar_mul_internal(point1: u64, point2: u64, scalar1: vector<u8>, scalar2: vector<u8>): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `multi_scalar_mul_internal` + + +
fun multi_scalar_mul_internal<P, S>(points: &vector<P>, scalars: &vector<S>): u64
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_multi_scalar_mul_internal<P, S>(points, scalars);
+
+ + + + + +### Function `scalar_is_canonical_internal` + + +
fun scalar_is_canonical_internal(s: vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_is_canonical_internal(s);
+
+ + + + + +### Function `scalar_from_u64_internal` + + +
fun scalar_from_u64_internal(num: u64): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_from_u64_internal(num);
+
+ + + + + +### Function `scalar_from_u128_internal` + + +
fun scalar_from_u128_internal(num: u128): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_from_u128_internal(num);
+
+ + + + + +### Function `scalar_reduced_from_32_bytes_internal` + + +
fun scalar_reduced_from_32_bytes_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+ensures result == spec_scalar_reduced_from_32_bytes_internal(bytes);
+
+ + + + + +### Function `scalar_uniform_from_64_bytes_internal` + + +
fun scalar_uniform_from_64_bytes_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_uniform_from_64_bytes_internal(bytes);
+
+ + + + + +### Function `scalar_invert_internal` + + +
fun scalar_invert_internal(bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_invert_internal(bytes);
+
+ + + + + +### Function `scalar_from_sha512_internal` + + +
fun scalar_from_sha512_internal(sha2_512_input: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_from_sha512_internal(sha2_512_input);
+
+ + + + + +### Function `scalar_mul_internal` + + +
fun scalar_mul_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_mul_internal(a_bytes, b_bytes);
+
+ + + + + +### Function `scalar_add_internal` + + +
fun scalar_add_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_add_internal(a_bytes, b_bytes);
+
+ + + + + +### Function `scalar_sub_internal` + + +
fun scalar_sub_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_sub_internal(a_bytes, b_bytes);
+
+ + + + + +### Function `scalar_neg_internal` + + +
fun scalar_neg_internal(a_bytes: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_scalar_neg_internal(a_bytes);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_bulletproofs.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_bulletproofs.md new file mode 100644 index 0000000000000..e98a3e6acd194 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_bulletproofs.md @@ -0,0 +1,321 @@ + + + +# Module `0x1::ristretto255_bulletproofs` + +This module implements a Bulletproof range proof verifier on the Ristretto255 curve. + +A Bulletproof-based zero-knowledge range proof is a proof that a Pedersen commitment +$c = v G + r H$ commits to an $n$-bit value $v$ (i.e., $v \in [0, 2^n)$). Currently, this module only supports +$n \in \{8, 16, 32, 64\}$ for the number of bits. + + +- [Struct `RangeProof`](#0x1_ristretto255_bulletproofs_RangeProof) +- [Constants](#@Constants_0) +- [Function `get_max_range_bits`](#0x1_ristretto255_bulletproofs_get_max_range_bits) +- [Function `range_proof_from_bytes`](#0x1_ristretto255_bulletproofs_range_proof_from_bytes) +- [Function `range_proof_to_bytes`](#0x1_ristretto255_bulletproofs_range_proof_to_bytes) +- [Function `verify_range_proof_pedersen`](#0x1_ristretto255_bulletproofs_verify_range_proof_pedersen) +- [Function `verify_range_proof`](#0x1_ristretto255_bulletproofs_verify_range_proof) +- [Function `verify_range_proof_internal`](#0x1_ristretto255_bulletproofs_verify_range_proof_internal) +- [Specification](#@Specification_1) + - [Function `verify_range_proof_internal`](#@Specification_1_verify_range_proof_internal) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::ristretto255;
+use 0x1::ristretto255_pedersen;
+
+ + + + + +## Struct `RangeProof` + +Represents a zero-knowledge range proof that a value committed inside a Pedersen commitment lies in +[0, 2^{MAX_RANGE_BITS}). + + +
struct RangeProof has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The native functions have not been rolled out yet. + + +
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 4;
+
+ + + + + +There was an error deserializing the range proof. + + +
const E_DESERIALIZE_RANGE_PROOF: u64 = 1;
+
+ + + + + +The range proof system only supports proving ranges of type $[0, 2^b)$ where $b \in \{8, 16, 32, 64\}$. + + +
const E_RANGE_NOT_SUPPORTED: u64 = 3;
+
+ + + + + +The committed value given to the prover is too large. + + +
const E_VALUE_OUTSIDE_RANGE: u64 = 2;
+
+ + + + + +The maximum range supported by the Bulletproofs library is $[0, 2^{64})$. + + +
const MAX_RANGE_BITS: u64 = 64;
+
+ + + + + +## Function `get_max_range_bits` + +Returns the maximum # of bits that the range proof system can verify proofs for. + + +
public fun get_max_range_bits(): u64
+
+ + + +
+Implementation + + +
public fun get_max_range_bits(): u64 {
+    MAX_RANGE_BITS
+}
+
+ + + +
+ + + +## Function `range_proof_from_bytes` + +Deserializes a range proof from a sequence of bytes. The serialization format is the same as the format in +the zkcrypto's bulletproofs library (https://docs.rs/bulletproofs/4.0.0/bulletproofs/struct.RangeProof.html#method.from_bytes). + + +
public fun range_proof_from_bytes(bytes: vector<u8>): ristretto255_bulletproofs::RangeProof
+
+ + + +
+Implementation + + +
public fun range_proof_from_bytes(bytes: vector<u8>): RangeProof {
+    RangeProof {
+        bytes
+    }
+}
+
+ + + +
+ + + +## Function `range_proof_to_bytes` + +Returns the byte-representation of a range proof. + + +
public fun range_proof_to_bytes(proof: &ristretto255_bulletproofs::RangeProof): vector<u8>
+
+ + + +
+Implementation + + +
public fun range_proof_to_bytes(proof: &RangeProof): vector<u8> {
+    proof.bytes
+}
+
+ + + +
+ + + +## Function `verify_range_proof_pedersen` + +Verifies a zero-knowledge range proof that the value v committed in com (under the default Bulletproofs +commitment key; see pedersen::new_commitment_for_bulletproof) satisfies $v \in [0, 2^b)$. Only works +for $b \in \{8, 16, 32, 64\}$. Additionally, checks that the prover used dst as the domain-separation +tag (DST). + +WARNING: The DST check is VERY important for security as it prevents proofs computed for one application +(a.k.a., a _domain_) with dst_1 from verifying in a different application with dst_2 != dst_1. + + +
public fun verify_range_proof_pedersen(com: &ristretto255_pedersen::Commitment, proof: &ristretto255_bulletproofs::RangeProof, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_range_proof_pedersen(com: &pedersen::Commitment, proof: &RangeProof, num_bits: u64, dst: vector<u8>): bool {
+    assert!(features::bulletproofs_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE));
+
+    verify_range_proof_internal(
+        ristretto255::point_to_bytes(&pedersen::commitment_as_compressed_point(com)),
+        &ristretto255::basepoint(), &ristretto255::hash_to_point_base(),
+        proof.bytes,
+        num_bits,
+        dst
+    )
+}
+
+ + + +
+ + + +## Function `verify_range_proof` + +Verifies a zero-knowledge range proof that the value v committed in com (as v * val_base + r * rand_base, +for some randomness r) satisfies v in [0, 2^num_bits). Only works for num_bits in {8, 16, 32, 64}. + + +
public fun verify_range_proof(com: &ristretto255::RistrettoPoint, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: &ristretto255_bulletproofs::RangeProof, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_range_proof(
+    com: &RistrettoPoint,
+    val_base: &RistrettoPoint, rand_base: &RistrettoPoint,
+    proof: &RangeProof, num_bits: u64, dst: vector<u8>): bool
+{
+    assert!(features::bulletproofs_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE));
+
+    verify_range_proof_internal(
+        ristretto255::point_to_bytes(&ristretto255::point_compress(com)),
+        val_base, rand_base,
+        proof.bytes, num_bits, dst
+    )
+}
+
+ + + +
+ + + +## Function `verify_range_proof_internal` + +Aborts with error::invalid_argument(E_DESERIALIZE_RANGE_PROOF) if proof is not a valid serialization of a +range proof. +Aborts with error::invalid_argument(E_RANGE_NOT_SUPPORTED) if an unsupported num_bits is provided. + + +
fun verify_range_proof_internal(com: vector<u8>, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: vector<u8>, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun verify_range_proof_internal(
+    com: vector<u8>,
+    val_base: &RistrettoPoint,
+    rand_base: &RistrettoPoint,
+    proof: vector<u8>,
+    num_bits: u64,
+    dst: vector<u8>): bool;
+
+ + + +
+ + + +## Specification + + + + +### Function `verify_range_proof_internal` + + +
fun verify_range_proof_internal(com: vector<u8>, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: vector<u8>, num_bits: u64, dst: vector<u8>): bool
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_elgamal.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_elgamal.md new file mode 100644 index 0000000000000..ec09869195b51 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_elgamal.md @@ -0,0 +1,707 @@ + + + +# Module `0x1::ristretto255_elgamal` + +This module implements an ElGamal encryption API, over the Ristretto255 curve, that can be used with the +Bulletproofs module. + +An ElGamal *ciphertext* is an encryption of a value v under a basepoint G and public key Y = sk * G, where sk +is the corresponding secret key, is (v * G + r * Y, r * G), for a random scalar r. + +Note that we place the value v "in the exponent" of G so that ciphertexts are additively homomorphic: i.e., so +that Enc_Y(v, r) + Enc_Y(v', r') = Enc_Y(v + v', r + r') where v, v' are plaintext messages, Y is a public key and r, r' +are the randomness of the ciphertexts. + + +- [Struct `Ciphertext`](#0x1_ristretto255_elgamal_Ciphertext) +- [Struct `CompressedCiphertext`](#0x1_ristretto255_elgamal_CompressedCiphertext) +- [Struct `CompressedPubkey`](#0x1_ristretto255_elgamal_CompressedPubkey) +- [Function `new_pubkey_from_bytes`](#0x1_ristretto255_elgamal_new_pubkey_from_bytes) +- [Function `pubkey_to_bytes`](#0x1_ristretto255_elgamal_pubkey_to_bytes) +- [Function `pubkey_to_point`](#0x1_ristretto255_elgamal_pubkey_to_point) +- [Function `pubkey_to_compressed_point`](#0x1_ristretto255_elgamal_pubkey_to_compressed_point) +- [Function `new_ciphertext_from_bytes`](#0x1_ristretto255_elgamal_new_ciphertext_from_bytes) +- [Function `new_ciphertext_no_randomness`](#0x1_ristretto255_elgamal_new_ciphertext_no_randomness) +- [Function `ciphertext_from_points`](#0x1_ristretto255_elgamal_ciphertext_from_points) +- [Function `ciphertext_from_compressed_points`](#0x1_ristretto255_elgamal_ciphertext_from_compressed_points) +- [Function `ciphertext_to_bytes`](#0x1_ristretto255_elgamal_ciphertext_to_bytes) +- [Function `ciphertext_into_points`](#0x1_ristretto255_elgamal_ciphertext_into_points) +- [Function `ciphertext_as_points`](#0x1_ristretto255_elgamal_ciphertext_as_points) +- [Function `compress_ciphertext`](#0x1_ristretto255_elgamal_compress_ciphertext) +- [Function `decompress_ciphertext`](#0x1_ristretto255_elgamal_decompress_ciphertext) +- [Function `ciphertext_add`](#0x1_ristretto255_elgamal_ciphertext_add) +- [Function `ciphertext_add_assign`](#0x1_ristretto255_elgamal_ciphertext_add_assign) +- [Function `ciphertext_sub`](#0x1_ristretto255_elgamal_ciphertext_sub) +- [Function `ciphertext_sub_assign`](#0x1_ristretto255_elgamal_ciphertext_sub_assign) +- [Function `ciphertext_clone`](#0x1_ristretto255_elgamal_ciphertext_clone) +- [Function `ciphertext_equals`](#0x1_ristretto255_elgamal_ciphertext_equals) +- [Function `get_value_component`](#0x1_ristretto255_elgamal_get_value_component) + + +
use 0x1::option;
+use 0x1::ristretto255;
+use 0x1::vector;
+
+ + + + + +## Struct `Ciphertext` + +An ElGamal ciphertext. + + +
struct Ciphertext has drop
+
+ + + +
+Fields + + +
+
+left: ristretto255::RistrettoPoint +
+
+ +
+
+right: ristretto255::RistrettoPoint +
+
+ +
+
+ + +
+ + + +## Struct `CompressedCiphertext` + +A compressed ElGamal ciphertext. + + +
struct CompressedCiphertext has copy, drop, store
+
+ + + +
+Fields + + +
+
+left: ristretto255::CompressedRistretto +
+
+ +
+
+right: ristretto255::CompressedRistretto +
+
+ +
+
+ + +
+ + + +## Struct `CompressedPubkey` + +An ElGamal public key. + + +
struct CompressedPubkey has copy, drop, store
+
+ + + +
+Fields + + +
+
+point: ristretto255::CompressedRistretto +
+
+ +
+
+ + +
+ + + +## Function `new_pubkey_from_bytes` + +Creates a new public key from a serialized Ristretto255 point. + + +
public fun new_pubkey_from_bytes(bytes: vector<u8>): option::Option<ristretto255_elgamal::CompressedPubkey>
+
+ + + +
+Implementation + + +
public fun new_pubkey_from_bytes(bytes: vector<u8>): Option<CompressedPubkey> {
+    let point = ristretto255::new_compressed_point_from_bytes(bytes);
+    if (std::option::is_some(&mut point)) {
+        let pk = CompressedPubkey {
+            point: std::option::extract(&mut point)
+        };
+        std::option::some(pk)
+    } else {
+        std::option::none<CompressedPubkey>()
+    }
+}
+
+ + + +
+ + + +## Function `pubkey_to_bytes` + +Given an ElGamal public key pubkey, returns the byte representation of that public key. + + +
public fun pubkey_to_bytes(pubkey: &ristretto255_elgamal::CompressedPubkey): vector<u8>
+
+ + + +
+Implementation + + +
public fun pubkey_to_bytes(pubkey: &CompressedPubkey): vector<u8> {
+    ristretto255::compressed_point_to_bytes(pubkey.point)
+}
+
+ + + +
+ + + +## Function `pubkey_to_point` + +Given a public key pubkey, returns the underlying RistrettoPoint representing that key. + + +
public fun pubkey_to_point(pubkey: &ristretto255_elgamal::CompressedPubkey): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun pubkey_to_point(pubkey: &CompressedPubkey): RistrettoPoint {
+    ristretto255::point_decompress(&pubkey.point)
+}
+
+ + + +
+ + + +## Function `pubkey_to_compressed_point` + +Given a public key, returns the underlying CompressedRistretto point representing that key. + + +
public fun pubkey_to_compressed_point(pubkey: &ristretto255_elgamal::CompressedPubkey): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun pubkey_to_compressed_point(pubkey: &CompressedPubkey): CompressedRistretto {
+    pubkey.point
+}
+
+ + + +
+ + + +## Function `new_ciphertext_from_bytes` + +Creates a new ciphertext from two serialized Ristretto255 points: the first 32 bytes store r * G while the +next 32 bytes store v * G + r * Y, where Y is the public key. + + +
public fun new_ciphertext_from_bytes(bytes: vector<u8>): option::Option<ristretto255_elgamal::Ciphertext>
+
+ + + +
+Implementation + + +
public fun new_ciphertext_from_bytes(bytes: vector<u8>): Option<Ciphertext> {
+    if(vector::length(&bytes) != 64) {
+        return std::option::none<Ciphertext>()
+    };
+
+    let bytes_right = vector::trim(&mut bytes, 32);
+
+    let left_point = ristretto255::new_point_from_bytes(bytes);
+    let right_point = ristretto255::new_point_from_bytes(bytes_right);
+
+    if (std::option::is_some<RistrettoPoint>(&mut left_point) && std::option::is_some<RistrettoPoint>(&mut right_point)) {
+        std::option::some<Ciphertext>(Ciphertext {
+            left: std::option::extract<RistrettoPoint>(&mut left_point),
+            right: std::option::extract<RistrettoPoint>(&mut right_point)
+        })
+    } else {
+        std::option::none<Ciphertext>()
+    }
+}
+
+ + + +
+ + + +## Function `new_ciphertext_no_randomness` + +Creates a new ciphertext (val * G + 0 * Y, 0 * G) = (val * G, 0 * G) where G is the Ristretto255 basepoint +and the randomness is set to zero. + + +
public fun new_ciphertext_no_randomness(val: &ristretto255::Scalar): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun new_ciphertext_no_randomness(val: &Scalar): Ciphertext {
+    Ciphertext {
+        left: ristretto255::basepoint_mul(val),
+        right: ristretto255::point_identity(),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_from_points` + +Moves a pair of Ristretto points into an ElGamal ciphertext. + + +
public fun ciphertext_from_points(left: ristretto255::RistrettoPoint, right: ristretto255::RistrettoPoint): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_from_points(left: RistrettoPoint, right: RistrettoPoint): Ciphertext {
+    Ciphertext {
+        left,
+        right,
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_from_compressed_points` + +Moves a pair of CompressedRistretto points into an ElGamal ciphertext. + + +
public fun ciphertext_from_compressed_points(left: ristretto255::CompressedRistretto, right: ristretto255::CompressedRistretto): ristretto255_elgamal::CompressedCiphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_from_compressed_points(left: CompressedRistretto, right: CompressedRistretto): CompressedCiphertext {
+    CompressedCiphertext {
+        left,
+        right,
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_to_bytes` + +Given a ciphertext ct, serializes that ciphertext into bytes. + + +
public fun ciphertext_to_bytes(ct: &ristretto255_elgamal::Ciphertext): vector<u8>
+
+ + + +
+Implementation + + +
public fun ciphertext_to_bytes(ct: &Ciphertext): vector<u8> {
+    let bytes_left = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.left));
+    let bytes_right = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.right));
+    let bytes = vector::empty<u8>();
+    vector::append<u8>(&mut bytes, bytes_left);
+    vector::append<u8>(&mut bytes, bytes_right);
+    bytes
+}
+
+ + + +
+ + + +## Function `ciphertext_into_points` + +Moves the ciphertext into a pair of RistrettoPoint's. + + +
public fun ciphertext_into_points(c: ristretto255_elgamal::Ciphertext): (ristretto255::RistrettoPoint, ristretto255::RistrettoPoint)
+
+ + + +
+Implementation + + +
public fun ciphertext_into_points(c: Ciphertext): (RistrettoPoint, RistrettoPoint) {
+    let Ciphertext { left, right } = c;
+    (left, right)
+}
+
+ + + +
+ + + +## Function `ciphertext_as_points` + +Returns the pair of RistrettoPoint's representing the ciphertext. + + +
public fun ciphertext_as_points(c: &ristretto255_elgamal::Ciphertext): (&ristretto255::RistrettoPoint, &ristretto255::RistrettoPoint)
+
+ + + +
+Implementation + + +
public fun ciphertext_as_points(c: &Ciphertext): (&RistrettoPoint, &RistrettoPoint) {
+    (&c.left, &c.right)
+}
+
+ + + +
+ + + +## Function `compress_ciphertext` + +Creates a new compressed ciphertext from a decompressed ciphertext. + + +
public fun compress_ciphertext(ct: &ristretto255_elgamal::Ciphertext): ristretto255_elgamal::CompressedCiphertext
+
+ + + +
+Implementation + + +
public fun compress_ciphertext(ct: &Ciphertext): CompressedCiphertext {
+    CompressedCiphertext {
+        left: point_compress(&ct.left),
+        right: point_compress(&ct.right),
+    }
+}
+
+ + + +
+ + + +## Function `decompress_ciphertext` + +Creates a new decompressed ciphertext from a compressed ciphertext. + + +
public fun decompress_ciphertext(ct: &ristretto255_elgamal::CompressedCiphertext): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun decompress_ciphertext(ct: &CompressedCiphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_decompress(&ct.left),
+        right: ristretto255::point_decompress(&ct.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_add` + +Homomorphically combines two ciphertexts lhs and rhs as lhs + rhs. +Useful for re-randomizing the ciphertext or updating the committed value. + + +
public fun ciphertext_add(lhs: &ristretto255_elgamal::Ciphertext, rhs: &ristretto255_elgamal::Ciphertext): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_add(lhs: &Ciphertext, rhs: &Ciphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_add(&lhs.left, &rhs.left),
+        right: ristretto255::point_add(&lhs.right, &rhs.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_add_assign` + +Like ciphertext_add but assigns lhs = lhs + rhs. + + +
public fun ciphertext_add_assign(lhs: &mut ristretto255_elgamal::Ciphertext, rhs: &ristretto255_elgamal::Ciphertext)
+
+ + + +
+Implementation + + +
public fun ciphertext_add_assign(lhs: &mut Ciphertext, rhs: &Ciphertext) {
+    ristretto255::point_add_assign(&mut lhs.left, &rhs.left);
+    ristretto255::point_add_assign(&mut lhs.right, &rhs.right);
+}
+
+ + + +
+ + + +## Function `ciphertext_sub` + +Homomorphically combines two ciphertexts lhs and rhs as lhs - rhs. +Useful for re-randomizing the ciphertext or updating the committed value. + + +
public fun ciphertext_sub(lhs: &ristretto255_elgamal::Ciphertext, rhs: &ristretto255_elgamal::Ciphertext): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_sub(lhs: &Ciphertext, rhs: &Ciphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_sub(&lhs.left, &rhs.left),
+        right: ristretto255::point_sub(&lhs.right, &rhs.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_sub_assign` + +Like ciphertext_add but assigns lhs = lhs - rhs. + + +
public fun ciphertext_sub_assign(lhs: &mut ristretto255_elgamal::Ciphertext, rhs: &ristretto255_elgamal::Ciphertext)
+
+ + + +
+Implementation + + +
public fun ciphertext_sub_assign(lhs: &mut Ciphertext, rhs: &Ciphertext) {
+    ristretto255::point_sub_assign(&mut lhs.left, &rhs.left);
+    ristretto255::point_sub_assign(&mut lhs.right, &rhs.right);
+}
+
+ + + +
+ + + +## Function `ciphertext_clone` + +Creates a copy of this ciphertext. + + +
public fun ciphertext_clone(c: &ristretto255_elgamal::Ciphertext): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_clone(c: &Ciphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_clone(&c.left),
+        right: ristretto255::point_clone(&c.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_equals` + +Returns true if the two ciphertexts are identical: i.e., same value and same randomness. + + +
public fun ciphertext_equals(lhs: &ristretto255_elgamal::Ciphertext, rhs: &ristretto255_elgamal::Ciphertext): bool
+
+ + + +
+Implementation + + +
public fun ciphertext_equals(lhs: &Ciphertext, rhs: &Ciphertext): bool {
+    ristretto255::point_equals(&lhs.left, &rhs.left) &&
+    ristretto255::point_equals(&lhs.right, &rhs.right)
+}
+
+ + + +
+ + + +## Function `get_value_component` + +Returns the RistrettoPoint in the ciphertext which contains the encrypted value in the exponent. + + +
public fun get_value_component(ct: &ristretto255_elgamal::Ciphertext): &ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun get_value_component(ct: &Ciphertext): &RistrettoPoint {
+    &ct.left
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_pedersen.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_pedersen.md new file mode 100644 index 0000000000000..f9d681ace0e90 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/ristretto255_pedersen.md @@ -0,0 +1,573 @@ + + + +# Module `0x1::ristretto255_pedersen` + +This module implements a Pedersen commitment API, over the Ristretto255 curve, that can be used with the +Bulletproofs module. + +A Pedersen commitment to a value v under _commitment key_ (g, h) is v * g + r * h, for a random scalar r. + + +- [Struct `Commitment`](#0x1_ristretto255_pedersen_Commitment) +- [Constants](#@Constants_0) +- [Function `new_commitment_from_bytes`](#0x1_ristretto255_pedersen_new_commitment_from_bytes) +- [Function `commitment_to_bytes`](#0x1_ristretto255_pedersen_commitment_to_bytes) +- [Function `commitment_from_point`](#0x1_ristretto255_pedersen_commitment_from_point) +- [Function `commitment_from_compressed`](#0x1_ristretto255_pedersen_commitment_from_compressed) +- [Function `new_commitment`](#0x1_ristretto255_pedersen_new_commitment) +- [Function `new_commitment_with_basepoint`](#0x1_ristretto255_pedersen_new_commitment_with_basepoint) +- [Function `new_commitment_for_bulletproof`](#0x1_ristretto255_pedersen_new_commitment_for_bulletproof) +- [Function `commitment_add`](#0x1_ristretto255_pedersen_commitment_add) +- [Function `commitment_add_assign`](#0x1_ristretto255_pedersen_commitment_add_assign) +- [Function `commitment_sub`](#0x1_ristretto255_pedersen_commitment_sub) +- [Function `commitment_sub_assign`](#0x1_ristretto255_pedersen_commitment_sub_assign) +- [Function `commitment_clone`](#0x1_ristretto255_pedersen_commitment_clone) +- [Function `commitment_equals`](#0x1_ristretto255_pedersen_commitment_equals) +- [Function `commitment_as_point`](#0x1_ristretto255_pedersen_commitment_as_point) +- [Function `commitment_as_compressed_point`](#0x1_ristretto255_pedersen_commitment_as_compressed_point) +- [Function `commitment_into_point`](#0x1_ristretto255_pedersen_commitment_into_point) +- [Function `commitment_into_compressed_point`](#0x1_ristretto255_pedersen_commitment_into_compressed_point) +- [Function `randomness_base_for_bulletproof`](#0x1_ristretto255_pedersen_randomness_base_for_bulletproof) + + +
use 0x1::option;
+use 0x1::ristretto255;
+
+ + + + + +## Struct `Commitment` + +A Pedersen commitment to some value with some randomness. + + +
struct Commitment has drop
+
+ + + +
+Fields + + +
+
+point: ristretto255::RistrettoPoint +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The default Pedersen randomness base h used in our underlying Bulletproofs library. +This is obtained by hashing the compressed Ristretto255 basepoint using SHA3-512 (not SHA2-512). + + +
const BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE: vector<u8> = [140, 146, 64, 180, 86, 169, 230, 220, 101, 195, 119, 161, 4, 141, 116, 95, 148, 160, 140, 219, 127, 68, 203, 205, 123, 70, 243, 64, 72, 135, 17, 52];
+
+ + + + + +## Function `new_commitment_from_bytes` + +Creates a new public key from a serialized Ristretto255 point. + + +
public fun new_commitment_from_bytes(bytes: vector<u8>): option::Option<ristretto255_pedersen::Commitment>
+
+ + + +
+Implementation + + +
public fun new_commitment_from_bytes(bytes: vector<u8>): Option<Commitment> {
+    let point = ristretto255::new_point_from_bytes(bytes);
+    if (std::option::is_some(&mut point)) {
+        let comm = Commitment {
+            point: std::option::extract(&mut point)
+        };
+        std::option::some(comm)
+    } else {
+        std::option::none<Commitment>()
+    }
+}
+
+ + + +
+ + + +## Function `commitment_to_bytes` + +Returns a commitment as a serialized byte array + + +
public fun commitment_to_bytes(comm: &ristretto255_pedersen::Commitment): vector<u8>
+
+ + + +
+Implementation + + +
public fun commitment_to_bytes(comm: &Commitment): vector<u8> {
+    ristretto255::point_to_bytes(&ristretto255::point_compress(&comm.point))
+}
+
+ + + +
+ + + +## Function `commitment_from_point` + +Moves a Ristretto point into a Pedersen commitment. + + +
public fun commitment_from_point(point: ristretto255::RistrettoPoint): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun commitment_from_point(point: RistrettoPoint): Commitment {
+    Commitment {
+        point
+    }
+}
+
+ + + +
+ + + +## Function `commitment_from_compressed` + +Deserializes a commitment from a compressed Ristretto point. + + +
public fun commitment_from_compressed(point: &ristretto255::CompressedRistretto): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun commitment_from_compressed(point: &CompressedRistretto): Commitment {
+    Commitment {
+        point: ristretto255::point_decompress(point)
+    }
+}
+
+ + + +
+ + + +## Function `new_commitment` + +Returns a commitment v * val_base + r * rand_base where (val_base, rand_base) is the commitment key. + + +
public fun new_commitment(v: &ristretto255::Scalar, val_base: &ristretto255::RistrettoPoint, r: &ristretto255::Scalar, rand_base: &ristretto255::RistrettoPoint): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun new_commitment(v: &Scalar, val_base: &RistrettoPoint, r: &Scalar, rand_base: &RistrettoPoint): Commitment {
+    Commitment {
+        point: ristretto255::double_scalar_mul(v, val_base, r, rand_base)
+    }
+}
+
+ + + +
+ + + +## Function `new_commitment_with_basepoint` + +Returns a commitment v * G + r * rand_base where G is the Ristretto255 basepoint. + + +
public fun new_commitment_with_basepoint(v: &ristretto255::Scalar, r: &ristretto255::Scalar, rand_base: &ristretto255::RistrettoPoint): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun new_commitment_with_basepoint(v: &Scalar, r: &Scalar, rand_base: &RistrettoPoint): Commitment {
+    Commitment {
+        point: ristretto255::basepoint_double_mul(r, rand_base, v)
+    }
+}
+
+ + + +
+ + + +## Function `new_commitment_for_bulletproof` + +Returns a commitment v * G + r * H where G is the Ristretto255 basepoint and H is the default randomness +base used in the Bulletproofs library (i.e., BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE). + + +
public fun new_commitment_for_bulletproof(v: &ristretto255::Scalar, r: &ristretto255::Scalar): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun new_commitment_for_bulletproof(v: &Scalar, r: &Scalar): Commitment {
+    let rand_base = ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE);
+    let rand_base = std::option::extract(&mut rand_base);
+
+    Commitment {
+        point: ristretto255::basepoint_double_mul(r, &rand_base, v)
+    }
+}
+
+ + + +
+ + + +## Function `commitment_add` + +Homomorphically combines two commitments lhs and rhs as lhs + rhs. +Useful for re-randomizing the commitment or updating the committed value. + + +
public fun commitment_add(lhs: &ristretto255_pedersen::Commitment, rhs: &ristretto255_pedersen::Commitment): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun commitment_add(lhs: &Commitment, rhs: &Commitment): Commitment {
+    Commitment {
+        point: ristretto255::point_add(&lhs.point, &rhs.point)
+    }
+}
+
+ + + +
+ + + +## Function `commitment_add_assign` + +Like commitment_add but assigns lhs = lhs + rhs. + + +
public fun commitment_add_assign(lhs: &mut ristretto255_pedersen::Commitment, rhs: &ristretto255_pedersen::Commitment)
+
+ + + +
+Implementation + + +
public fun commitment_add_assign(lhs: &mut Commitment, rhs: &Commitment) {
+    ristretto255::point_add_assign(&mut lhs.point, &rhs.point);
+}
+
+ + + +
+ + + +## Function `commitment_sub` + +Homomorphically combines two commitments lhs and rhs as lhs - rhs. +Useful for re-randomizing the commitment or updating the committed value. + + +
public fun commitment_sub(lhs: &ristretto255_pedersen::Commitment, rhs: &ristretto255_pedersen::Commitment): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun commitment_sub(lhs: &Commitment, rhs: &Commitment): Commitment {
+    Commitment {
+        point: ristretto255::point_sub(&lhs.point, &rhs.point)
+    }
+}
+
+ + + +
+ + + +## Function `commitment_sub_assign` + +Like commitment_add but assigns lhs = lhs - rhs. + + +
public fun commitment_sub_assign(lhs: &mut ristretto255_pedersen::Commitment, rhs: &ristretto255_pedersen::Commitment)
+
+ + + +
+Implementation + + +
public fun commitment_sub_assign(lhs: &mut Commitment, rhs: &Commitment) {
+    ristretto255::point_sub_assign(&mut lhs.point, &rhs.point);
+}
+
+ + + +
+ + + +## Function `commitment_clone` + +Creates a copy of this commitment. + + +
public fun commitment_clone(c: &ristretto255_pedersen::Commitment): ristretto255_pedersen::Commitment
+
+ + + +
+Implementation + + +
public fun commitment_clone(c: &Commitment): Commitment {
+    Commitment {
+        point: ristretto255::point_clone(&c.point)
+    }
+}
+
+ + + +
+ + + +## Function `commitment_equals` + +Returns true if the two commitments are identical: i.e., same value and same randomness. + + +
public fun commitment_equals(lhs: &ristretto255_pedersen::Commitment, rhs: &ristretto255_pedersen::Commitment): bool
+
+ + + +
+Implementation + + +
public fun commitment_equals(lhs: &Commitment, rhs: &Commitment): bool {
+    ristretto255::point_equals(&lhs.point, &rhs.point)
+}
+
+ + + +
+ + + +## Function `commitment_as_point` + +Returns the underlying elliptic curve point representing the commitment as an in-memory RistrettoPoint. + + +
public fun commitment_as_point(c: &ristretto255_pedersen::Commitment): &ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun commitment_as_point(c: &Commitment): &RistrettoPoint {
+    &c.point
+}
+
+ + + +
+ + + +## Function `commitment_as_compressed_point` + +Returns the Pedersen commitment as a CompressedRistretto point. + + +
public fun commitment_as_compressed_point(c: &ristretto255_pedersen::Commitment): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun commitment_as_compressed_point(c: &Commitment): CompressedRistretto {
+    point_compress(&c.point)
+}
+
+ + + +
+ + + +## Function `commitment_into_point` + +Moves the Commitment into a CompressedRistretto point. + + +
public fun commitment_into_point(c: ristretto255_pedersen::Commitment): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun commitment_into_point(c: Commitment): RistrettoPoint {
+    let Commitment { point } = c;
+    point
+}
+
+ + + +
+ + + +## Function `commitment_into_compressed_point` + +Moves the Commitment into a CompressedRistretto point. + + +
public fun commitment_into_compressed_point(c: ristretto255_pedersen::Commitment): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun commitment_into_compressed_point(c: Commitment): CompressedRistretto {
+    point_compress(&c.point)
+}
+
+ + + +
+ + + +## Function `randomness_base_for_bulletproof` + +Returns the randomness base compatible with the Bulletproofs module. + +Recal that a Bulletproof range proof attests, in zero-knowledge, that a value v inside a Pedersen commitment +v * g + r * h is sufficiently "small" (e.g., is 32-bits wide). Here, h is referred to as the +"randomness base" of the commitment scheme. + +Bulletproof has a default choice for g and h and this function returns the default h as used in the +Bulletproofs Move module. + + +
public fun randomness_base_for_bulletproof(): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun randomness_base_for_bulletproof(): RistrettoPoint {
+    std::option::extract(&mut ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE))
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/secp256k1.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/secp256k1.md new file mode 100644 index 0000000000000..813c491081892 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/secp256k1.md @@ -0,0 +1,432 @@ + + + +# Module `0x1::secp256k1` + +This module implements ECDSA signatures based on the prime-order secp256k1 ellptic curve (i.e., cofactor is 1). + + +- [Struct `ECDSARawPublicKey`](#0x1_secp256k1_ECDSARawPublicKey) +- [Struct `ECDSASignature`](#0x1_secp256k1_ECDSASignature) +- [Constants](#@Constants_0) +- [Function `ecdsa_signature_from_bytes`](#0x1_secp256k1_ecdsa_signature_from_bytes) +- [Function `ecdsa_raw_public_key_from_64_bytes`](#0x1_secp256k1_ecdsa_raw_public_key_from_64_bytes) +- [Function `ecdsa_raw_public_key_to_bytes`](#0x1_secp256k1_ecdsa_raw_public_key_to_bytes) +- [Function `ecdsa_signature_to_bytes`](#0x1_secp256k1_ecdsa_signature_to_bytes) +- [Function `ecdsa_recover`](#0x1_secp256k1_ecdsa_recover) +- [Function `ecdsa_recover_internal`](#0x1_secp256k1_ecdsa_recover_internal) +- [Specification](#@Specification_1) + - [Function `ecdsa_signature_from_bytes`](#@Specification_1_ecdsa_signature_from_bytes) + - [Function `ecdsa_raw_public_key_from_64_bytes`](#@Specification_1_ecdsa_raw_public_key_from_64_bytes) + - [Function `ecdsa_raw_public_key_to_bytes`](#@Specification_1_ecdsa_raw_public_key_to_bytes) + - [Function `ecdsa_signature_to_bytes`](#@Specification_1_ecdsa_signature_to_bytes) + - [Function `ecdsa_recover`](#@Specification_1_ecdsa_recover) + - [Function `ecdsa_recover_internal`](#@Specification_1_ecdsa_recover_internal) + + +
use 0x1::error;
+use 0x1::option;
+
+ + + + + +## Struct `ECDSARawPublicKey` + +A 64-byte ECDSA public key. + + +
struct ECDSARawPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `ECDSASignature` + +A 64-byte ECDSA signature. + + +
struct ECDSASignature has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The size of a secp256k1-based ECDSA signature, in bytes. + + +
const SIGNATURE_NUM_BYTES: u64 = 64;
+
+ + + + + +An error occurred while deserializing, for example due to wrong input size. + + +
const E_DESERIALIZE: u64 = 1;
+
+ + + + + +The size of a secp256k1-based ECDSA public key, in bytes. + + +
const RAW_PUBLIC_KEY_NUM_BYTES: u64 = 64;
+
+ + + + + +## Function `ecdsa_signature_from_bytes` + +Constructs an ECDSASignature struct from the given 64 bytes. + + +
public fun ecdsa_signature_from_bytes(bytes: vector<u8>): secp256k1::ECDSASignature
+
+ + + +
+Implementation + + +
public fun ecdsa_signature_from_bytes(bytes: vector<u8>): ECDSASignature {
+    assert!(std::vector::length(&bytes) == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
+    ECDSASignature { bytes }
+}
+
+ + + +
+ + + +## Function `ecdsa_raw_public_key_from_64_bytes` + +Constructs an ECDSARawPublicKey struct, given a 64-byte raw representation. + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): secp256k1::ECDSARawPublicKey
+
+ + + +
+Implementation + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): ECDSARawPublicKey {
+    assert!(std::vector::length(&bytes) == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
+    ECDSARawPublicKey { bytes }
+}
+
+ + + +
+ + + +## Function `ecdsa_raw_public_key_to_bytes` + +Serializes an ECDSARawPublicKey struct to 64-bytes. + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &secp256k1::ECDSARawPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &ECDSARawPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Function `ecdsa_signature_to_bytes` + +Serializes an ECDSASignature struct to 64-bytes. + + +
public fun ecdsa_signature_to_bytes(sig: &secp256k1::ECDSASignature): vector<u8>
+
+ + + +
+Implementation + + +
public fun ecdsa_signature_to_bytes(sig: &ECDSASignature): vector<u8> {
+    sig.bytes
+}
+
+ + + +
+ + + +## Function `ecdsa_recover` + +Recovers the signer's raw (64-byte) public key from a secp256k1 ECDSA signature given the recovery_id and the signed +message (32 byte digest). + +Note that an invalid signature, or a signature from a different message, will result in the recovery of an +incorrect public key. This recovery algorithm can only be used to check validity of a signature if the signer's +public key (or its hash) is known beforehand. + + +
public fun ecdsa_recover(message: vector<u8>, recovery_id: u8, signature: &secp256k1::ECDSASignature): option::Option<secp256k1::ECDSARawPublicKey>
+
+ + + +
+Implementation + + +
public fun ecdsa_recover(
+    message: vector<u8>,
+    recovery_id: u8,
+    signature: &ECDSASignature,
+): Option<ECDSARawPublicKey> {
+    let (pk, success) = ecdsa_recover_internal(message, recovery_id, signature.bytes);
+    if (success) {
+        std::option::some(ecdsa_raw_public_key_from_64_bytes(pk))
+    } else {
+        std::option::none<ECDSARawPublicKey>()
+    }
+}
+
+ + + +
+ + + +## Function `ecdsa_recover_internal` + +Returns (public_key, true) if signature verifies on message under the recovered public_key +and returns ([], false) otherwise. + + +
fun ecdsa_recover_internal(message: vector<u8>, recovery_id: u8, signature: vector<u8>): (vector<u8>, bool)
+
+ + + +
+Implementation + + +
native fun ecdsa_recover_internal(
+    message: vector<u8>,
+    recovery_id: u8,
+    signature: vector<u8>
+): (vector<u8>, bool);
+
+ + + +
+ + + +## Specification + + + + +### Function `ecdsa_signature_from_bytes` + + +
public fun ecdsa_signature_from_bytes(bytes: vector<u8>): secp256k1::ECDSASignature
+
+ + + + +
aborts_if len(bytes) != SIGNATURE_NUM_BYTES;
+ensures result == ECDSASignature { bytes };
+
+ + + + + +### Function `ecdsa_raw_public_key_from_64_bytes` + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): secp256k1::ECDSARawPublicKey
+
+ + + + +
aborts_if len(bytes) != RAW_PUBLIC_KEY_NUM_BYTES;
+ensures result == ECDSARawPublicKey { bytes };
+
+ + + + + +### Function `ecdsa_raw_public_key_to_bytes` + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &secp256k1::ECDSARawPublicKey): vector<u8>
+
+ + + + +
aborts_if false;
+ensures result == pk.bytes;
+
+ + + + + +### Function `ecdsa_signature_to_bytes` + + +
public fun ecdsa_signature_to_bytes(sig: &secp256k1::ECDSASignature): vector<u8>
+
+ + + + +
aborts_if false;
+ensures result == sig.bytes;
+
+ + + + + +### Function `ecdsa_recover` + + +
public fun ecdsa_recover(message: vector<u8>, recovery_id: u8, signature: &secp256k1::ECDSASignature): option::Option<secp256k1::ECDSARawPublicKey>
+
+ + + + +
aborts_if ecdsa_recover_internal_abort_condition(message, recovery_id, signature.bytes);
+let pk = spec_ecdsa_recover_internal_result_1(message, recovery_id, signature.bytes);
+let success = spec_ecdsa_recover_internal_result_2(message, recovery_id, signature.bytes);
+ensures success ==> result == std::option::spec_some(ecdsa_raw_public_key_from_64_bytes(pk));
+ensures !success ==> result == std::option::spec_none<ECDSARawPublicKey>();
+
+ + + + + +### Function `ecdsa_recover_internal` + + +
fun ecdsa_recover_internal(message: vector<u8>, recovery_id: u8, signature: vector<u8>): (vector<u8>, bool)
+
+ + + + +
pragma opaque;
+aborts_if ecdsa_recover_internal_abort_condition(message, recovery_id, signature);
+ensures result_1 == spec_ecdsa_recover_internal_result_1(message, recovery_id, signature);
+ensures result_2 == spec_ecdsa_recover_internal_result_2(message, recovery_id, signature);
+ensures len(result_1) == if (result_2) { RAW_PUBLIC_KEY_NUM_BYTES } else { 0 };
+
+ + + + + + + +
fun ecdsa_recover_internal_abort_condition(message: vector<u8>, recovery_id: u8, signature: vector<u8>): bool;
+
+ + + + + + + +
fun spec_ecdsa_recover_internal_result_1(message: vector<u8>, recovery_id: u8, signature: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_ecdsa_recover_internal_result_2(message: vector<u8>, recovery_id: u8, signature: vector<u8>): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/simple_map.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/simple_map.md new file mode 100644 index 0000000000000..05603d3e773cf --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/simple_map.md @@ -0,0 +1,1035 @@ + + + +# Module `0x1::simple_map` + +This module provides a solution for unsorted maps, that is it has the properties that +1) Keys point to Values +2) Each Key must be unique +3) A Key can be found within O(N) time +4) The keys are unsorted. +5) Adds and removals take O(N) time + + +- [Struct `SimpleMap`](#0x1_simple_map_SimpleMap) +- [Struct `Element`](#0x1_simple_map_Element) +- [Constants](#@Constants_0) +- [Function `length`](#0x1_simple_map_length) +- [Function `new`](#0x1_simple_map_new) +- [Function `new_from`](#0x1_simple_map_new_from) +- [Function `create`](#0x1_simple_map_create) +- [Function `borrow`](#0x1_simple_map_borrow) +- [Function `borrow_mut`](#0x1_simple_map_borrow_mut) +- [Function `contains_key`](#0x1_simple_map_contains_key) +- [Function `destroy_empty`](#0x1_simple_map_destroy_empty) +- [Function `add`](#0x1_simple_map_add) +- [Function `add_all`](#0x1_simple_map_add_all) +- [Function `upsert`](#0x1_simple_map_upsert) +- [Function `keys`](#0x1_simple_map_keys) +- [Function `values`](#0x1_simple_map_values) +- [Function `to_vec_pair`](#0x1_simple_map_to_vec_pair) +- [Function `destroy`](#0x1_simple_map_destroy) +- [Function `remove`](#0x1_simple_map_remove) +- [Function `find`](#0x1_simple_map_find) +- [Specification](#@Specification_1) + - [Struct `SimpleMap`](#@Specification_1_SimpleMap) + - [Function `length`](#@Specification_1_length) + - [Function `new`](#@Specification_1_new) + - [Function `new_from`](#@Specification_1_new_from) + - [Function `create`](#@Specification_1_create) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `borrow_mut`](#@Specification_1_borrow_mut) + - [Function `contains_key`](#@Specification_1_contains_key) + - [Function `destroy_empty`](#@Specification_1_destroy_empty) + - [Function `add`](#@Specification_1_add) + - [Function `add_all`](#@Specification_1_add_all) + - [Function `upsert`](#@Specification_1_upsert) + - [Function `keys`](#@Specification_1_keys) + - [Function `values`](#@Specification_1_values) + - [Function `to_vec_pair`](#@Specification_1_to_vec_pair) + - [Function `remove`](#@Specification_1_remove) + - [Function `find`](#@Specification_1_find) + + +
use 0x1::error;
+use 0x1::option;
+use 0x1::vector;
+
+ + + + + +## Struct `SimpleMap` + + + +
struct SimpleMap<Key, Value> has copy, drop, store
+
+ + + +
+Fields + + +
+
+data: vector<simple_map::Element<Key, Value>> +
+
+ +
+
+ + +
+ + + +## Struct `Element` + + + +
struct Element<Key, Value> has copy, drop, store
+
+ + + +
+Fields + + +
+
+key: Key +
+
+ +
+
+value: Value +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Map key already exists + + +
const EKEY_ALREADY_EXISTS: u64 = 1;
+
+ + + + + +Map key is not found + + +
const EKEY_NOT_FOUND: u64 = 2;
+
+ + + + + +## Function `length` + + + +
public fun length<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>): u64
+
+ + + +
+Implementation + + +
public fun length<Key: store, Value: store>(self: &SimpleMap<Key, Value>): u64 {
+    vector::length(&self.data)
+}
+
+ + + +
+ + + +## Function `new` + +Create an empty SimpleMap. + + +
public fun new<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
+ + + +
+Implementation + + +
public fun new<Key: store, Value: store>(): SimpleMap<Key, Value> {
+    SimpleMap {
+        data: vector::empty(),
+    }
+}
+
+ + + +
+ + + +## Function `new_from` + +Create a SimpleMap from a vector of keys and values. The keys must be unique. + + +
public fun new_from<Key: store, Value: store>(keys: vector<Key>, values: vector<Value>): simple_map::SimpleMap<Key, Value>
+
+ + + +
+Implementation + + +
public fun new_from<Key: store, Value: store>(
+    keys: vector<Key>,
+    values: vector<Value>,
+): SimpleMap<Key, Value> {
+    let map = new();
+    add_all(&mut map, keys, values);
+    map
+}
+
+ + + +
+ + + +## Function `create` + +Create an empty SimpleMap. +This function is deprecated, use new instead. + + +
#[deprecated]
+public fun create<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
+ + + +
+Implementation + + +
public fun create<Key: store, Value: store>(): SimpleMap<Key, Value> {
+    new()
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
public fun borrow<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
+
+ + + +
+Implementation + + +
public fun borrow<Key: store, Value: store>(
+    self: &SimpleMap<Key, Value>,
+    key: &Key,
+): &Value {
+    let maybe_idx = find(self, key);
+    assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
+    let idx = option::extract(&mut maybe_idx);
+    &vector::borrow(&self.data, idx).value
+}
+
+ + + +
+ + + +## Function `borrow_mut` + + + +
public fun borrow_mut<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
+
+ + + +
+Implementation + + +
public fun borrow_mut<Key: store, Value: store>(
+    self: &mut SimpleMap<Key, Value>,
+    key: &Key,
+): &mut Value {
+    let maybe_idx = find(self, key);
+    assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
+    let idx = option::extract(&mut maybe_idx);
+    &mut vector::borrow_mut(&mut self.data, idx).value
+}
+
+ + + +
+ + + +## Function `contains_key` + + + +
public fun contains_key<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
+
+ + + +
+Implementation + + +
public fun contains_key<Key: store, Value: store>(
+    self: &SimpleMap<Key, Value>,
+    key: &Key,
+): bool {
+    let maybe_idx = find(self, key);
+    option::is_some(&maybe_idx)
+}
+
+ + + +
+ + + +## Function `destroy_empty` + + + +
public fun destroy_empty<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<Key: store, Value: store>(self: SimpleMap<Key, Value>) {
+    let SimpleMap { data } = self;
+    vector::destroy_empty(data);
+}
+
+ + + +
+ + + +## Function `add` + +Add a key/value pair to the map. The key must not already exist. + + +
public fun add<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
+
+ + + +
+Implementation + + +
public fun add<Key: store, Value: store>(
+    self: &mut SimpleMap<Key, Value>,
+    key: Key,
+    value: Value,
+) {
+    let maybe_idx = find(self, &key);
+    assert!(option::is_none(&maybe_idx), error::invalid_argument(EKEY_ALREADY_EXISTS));
+
+    vector::push_back(&mut self.data, Element { key, value });
+}
+
+ + + +
+ + + +## Function `add_all` + +Add multiple key/value pairs to the map. The keys must not already exist. + + +
public fun add_all<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
+ + + +
+Implementation + + +
public fun add_all<Key: store, Value: store>(
+    self: &mut SimpleMap<Key, Value>,
+    keys: vector<Key>,
+    values: vector<Value>,
+) {
+    vector::zip(keys, values, |key, value| {
+        add(self, key, value);
+    });
+}
+
+ + + +
+ + + +## Function `upsert` + +Insert key/value pair or update an existing key to a new value + + +
public fun upsert<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
+
+ + + +
+Implementation + + +
public fun upsert<Key: store, Value: store>(
+    self: &mut SimpleMap<Key, Value>,
+    key: Key,
+    value: Value
+): (std::option::Option<Key>, std::option::Option<Value>) {
+    let data = &mut self.data;
+    let len = vector::length(data);
+    let i = 0;
+    while (i < len) {
+        let element = vector::borrow(data, i);
+        if (&element.key == &key) {
+            vector::push_back(data, Element { key, value });
+            vector::swap(data, i, len);
+            let Element { key, value } = vector::pop_back(data);
+            return (std::option::some(key), std::option::some(value))
+        };
+        i = i + 1;
+    };
+    vector::push_back(&mut self.data, Element { key, value });
+    (std::option::none(), std::option::none())
+}
+
+ + + +
+ + + +## Function `keys` + +Return all keys in the map. This requires keys to be copyable. + + +
public fun keys<Key: copy, Value>(self: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
+ + + +
+Implementation + + +
public fun keys<Key: copy, Value>(self: &SimpleMap<Key, Value>): vector<Key> {
+    vector::map_ref(&self.data, |e| {
+        let e: &Element<Key, Value> = e;
+        e.key
+    })
+}
+
+ + + +
+ + + +## Function `values` + +Return all values in the map. This requires values to be copyable. + + +
public fun values<Key, Value: copy>(self: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
+ + + +
+Implementation + + +
public fun values<Key, Value: copy>(self: &SimpleMap<Key, Value>): vector<Value> {
+    vector::map_ref(&self.data, |e| {
+        let e: &Element<Key, Value> = e;
+        e.value
+    })
+}
+
+ + + +
+ + + +## Function `to_vec_pair` + +Transform the map into two vectors with the keys and values respectively +Primarily used to destroy a map + + +
public fun to_vec_pair<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
+
+ + + +
+Implementation + + +
public fun to_vec_pair<Key: store, Value: store>(
+    self: SimpleMap<Key, Value>): (vector<Key>, vector<Value>) {
+    let keys: vector<Key> = vector::empty();
+    let values: vector<Value> = vector::empty();
+    let SimpleMap { data } = self;
+    vector::for_each(data, |e| {
+        let Element { key, value } = e;
+        vector::push_back(&mut keys, key);
+        vector::push_back(&mut values, value);
+    });
+    (keys, values)
+}
+
+ + + +
+ + + +## Function `destroy` + +For maps that cannot be dropped this is a utility to destroy them +using lambdas to destroy the individual keys and values. + + +
public fun destroy<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>, dk: |Key|, dv: |Value|)
+
+ + + +
+Implementation + + +
public inline fun destroy<Key: store, Value: store>(
+    self: SimpleMap<Key, Value>,
+    dk: |Key|,
+    dv: |Value|
+) {
+    let (keys, values) = to_vec_pair(self);
+    vector::destroy(keys, |_k| dk(_k));
+    vector::destroy(values, |_v| dv(_v));
+}
+
+ + + +
+ + + +## Function `remove` + +Remove a key/value pair from the map. The key must exist. + + +
public fun remove<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
+
+ + + +
+Implementation + + +
public fun remove<Key: store, Value: store>(
+    self: &mut SimpleMap<Key, Value>,
+    key: &Key,
+): (Key, Value) {
+    let maybe_idx = find(self, key);
+    assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
+    let placement = option::extract(&mut maybe_idx);
+    let Element { key, value } = vector::swap_remove(&mut self.data, placement);
+    (key, value)
+}
+
+ + + +
+ + + +## Function `find` + + + +
fun find<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
+
+ + + +
+Implementation + + +
fun find<Key: store, Value: store>(
+    self: &SimpleMap<Key, Value>,
+    key: &Key,
+): option::Option<u64> {
+    let leng = vector::length(&self.data);
+    let i = 0;
+    while (i < leng) {
+        let element = vector::borrow(&self.data, i);
+        if (&element.key == key) {
+            return option::some(i)
+        };
+        i = i + 1;
+    };
+    option::none<u64>()
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `SimpleMap` + + +
struct SimpleMap<Key, Value> has copy, drop, store
+
+ + + +
+
+data: vector<simple_map::Element<Key, Value>> +
+
+ +
+
+ + + +
pragma intrinsic = map,
+    map_new = create,
+    map_len = length,
+    map_destroy_empty = destroy_empty,
+    map_has_key = contains_key,
+    map_add_no_override = add,
+    map_del_return_key = remove,
+    map_borrow = borrow,
+    map_borrow_mut = borrow_mut,
+    map_spec_get = spec_get,
+    map_spec_set = spec_set,
+    map_spec_del = spec_remove,
+    map_spec_len = spec_len,
+    map_spec_has_key = spec_contains_key;
+
+ + + + + +### Function `length` + + +
public fun length<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>): u64
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `new` + + +
public fun new<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] spec_len(result) == 0;
+ensures [abstract] forall k: Key: !spec_contains_key(result, k);
+
+ + + + + +### Function `new_from` + + +
public fun new_from<Key: store, Value: store>(keys: vector<Key>, values: vector<Value>): simple_map::SimpleMap<Key, Value>
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] spec_len(result) == len(keys);
+ensures [abstract] forall k: Key: spec_contains_key(result, k) <==> vector::spec_contains(keys, k);
+ensures [abstract] forall i in 0..len(keys):
+    spec_get(result, vector::borrow(keys, i)) == vector::borrow(values, i);
+
+ + + + + +### Function `create` + + +
#[deprecated]
+public fun create<Key: store, Value: store>(): simple_map::SimpleMap<Key, Value>
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow` + + +
public fun borrow<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `contains_key` + + +
public fun contains_key<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `destroy_empty` + + +
public fun destroy_empty<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `add_all` + + +
public fun add_all<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `upsert` + + +
public fun upsert<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_1);
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_2);
+ensures [abstract] spec_contains_key(self, key);
+ensures [abstract] spec_get(self, key) == value;
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key));
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(
+    self
+), key)));
+
+ + + + + + + +
native fun spec_len<K, V>(t: SimpleMap<K, V>): num;
+
+ + + + + + + +
native fun spec_contains_key<K, V>(t: SimpleMap<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: SimpleMap<K, V>, k: K, v: V): SimpleMap<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: SimpleMap<K, V>, k: K): SimpleMap<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: SimpleMap<K, V>, k: K): V;
+
+ + + + + +### Function `keys` + + +
public fun keys<Key: copy, Value>(self: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
+ + + + +
pragma verify=false;
+
+ + + + + +### Function `values` + + +
public fun values<Key, Value: copy>(self: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
+ + + + +
pragma verify=false;
+
+ + + + + +### Function `to_vec_pair` + + +
public fun to_vec_pair<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+ensures [abstract]
+    forall k: Key: vector::spec_contains(result_1, k) <==>
+        spec_contains_key(self, k);
+ensures [abstract] forall i in 0..len(result_1):
+    spec_get(self, vector::borrow(result_1, i)) == vector::borrow(result_2, i);
+
+ + + + + +### Function `remove` + + +
public fun remove<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `find` + + +
fun find<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
+
+ + + + +
pragma verify=false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/smart_table.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/smart_table.md new file mode 100644 index 0000000000000..d6e9282f82899 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/smart_table.md @@ -0,0 +1,1786 @@ + + + +# Module `0x1::smart_table` + +A smart table implementation based on linear hashing. (https://en.wikipedia.org/wiki/Linear_hashing) +Compare to Table, it uses less storage slots but has higher chance of collision, a trade-off between space and time. +Compare to other dynamic hashing implementation, linear hashing splits one bucket a time instead of doubling buckets +when expanding to avoid unexpected gas cost. +SmartTable uses faster hash function SipHash instead of cryptographically secure hash functions like sha3-256 since +it tolerates collisions. + + +- [Struct `Entry`](#0x1_smart_table_Entry) +- [Struct `SmartTable`](#0x1_smart_table_SmartTable) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_smart_table_new) +- [Function `new_with_config`](#0x1_smart_table_new_with_config) +- [Function `destroy_empty`](#0x1_smart_table_destroy_empty) +- [Function `destroy`](#0x1_smart_table_destroy) +- [Function `clear`](#0x1_smart_table_clear) +- [Function `add`](#0x1_smart_table_add) +- [Function `add_all`](#0x1_smart_table_add_all) +- [Function `unzip_entries`](#0x1_smart_table_unzip_entries) +- [Function `to_simple_map`](#0x1_smart_table_to_simple_map) +- [Function `keys`](#0x1_smart_table_keys) +- [Function `keys_paginated`](#0x1_smart_table_keys_paginated) +- [Function `split_one_bucket`](#0x1_smart_table_split_one_bucket) +- [Function `bucket_index`](#0x1_smart_table_bucket_index) +- [Function `borrow`](#0x1_smart_table_borrow) +- [Function `borrow_with_default`](#0x1_smart_table_borrow_with_default) +- [Function `borrow_mut`](#0x1_smart_table_borrow_mut) +- [Function `borrow_mut_with_default`](#0x1_smart_table_borrow_mut_with_default) +- [Function `contains`](#0x1_smart_table_contains) +- [Function `remove`](#0x1_smart_table_remove) +- [Function `upsert`](#0x1_smart_table_upsert) +- [Function `length`](#0x1_smart_table_length) +- [Function `load_factor`](#0x1_smart_table_load_factor) +- [Function `update_split_load_threshold`](#0x1_smart_table_update_split_load_threshold) +- [Function `update_target_bucket_size`](#0x1_smart_table_update_target_bucket_size) +- [Function `for_each_ref`](#0x1_smart_table_for_each_ref) +- [Function `for_each_mut`](#0x1_smart_table_for_each_mut) +- [Function `map_ref`](#0x1_smart_table_map_ref) +- [Function `any`](#0x1_smart_table_any) +- [Function `borrow_kv`](#0x1_smart_table_borrow_kv) +- [Function `borrow_kv_mut`](#0x1_smart_table_borrow_kv_mut) +- [Function `num_buckets`](#0x1_smart_table_num_buckets) +- [Function `borrow_buckets`](#0x1_smart_table_borrow_buckets) +- [Function `borrow_buckets_mut`](#0x1_smart_table_borrow_buckets_mut) +- [Specification](#@Specification_1) + - [Struct `SmartTable`](#@Specification_1_SmartTable) + - [Function `new_with_config`](#@Specification_1_new_with_config) + - [Function `destroy`](#@Specification_1_destroy) + - [Function `clear`](#@Specification_1_clear) + - [Function `add_all`](#@Specification_1_add_all) + - [Function `to_simple_map`](#@Specification_1_to_simple_map) + - [Function `keys`](#@Specification_1_keys) + - [Function `keys_paginated`](#@Specification_1_keys_paginated) + - [Function `split_one_bucket`](#@Specification_1_split_one_bucket) + - [Function `bucket_index`](#@Specification_1_bucket_index) + - [Function `borrow_with_default`](#@Specification_1_borrow_with_default) + - [Function `load_factor`](#@Specification_1_load_factor) + - [Function `update_split_load_threshold`](#@Specification_1_update_split_load_threshold) + - [Function `update_target_bucket_size`](#@Specification_1_update_target_bucket_size) + - [Function `borrow_kv`](#@Specification_1_borrow_kv) + - [Function `borrow_kv_mut`](#@Specification_1_borrow_kv_mut) + - [Function `num_buckets`](#@Specification_1_num_buckets) + - [Function `borrow_buckets`](#@Specification_1_borrow_buckets) + - [Function `borrow_buckets_mut`](#@Specification_1_borrow_buckets_mut) + + +
use 0x1::aptos_hash;
+use 0x1::error;
+use 0x1::math64;
+use 0x1::option;
+use 0x1::simple_map;
+use 0x1::table_with_length;
+use 0x1::type_info;
+use 0x1::vector;
+
+ + + + + +## Struct `Entry` + +SmartTable entry contains both the key and value. + + +
struct Entry<K, V> has copy, drop, store
+
+ + + +
+Fields + + +
+
+hash: u64 +
+
+ +
+
+key: K +
+
+ +
+
+value: V +
+
+ +
+
+ + +
+ + + +## Struct `SmartTable` + + + +
struct SmartTable<K, V> has store
+
+ + + +
+Fields + + +
+
+buckets: table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>> +
+
+ +
+
+num_buckets: u64 +
+
+ +
+
+level: u8 +
+
+ +
+
+size: u64 +
+
+ +
+
+split_load_threshold: u8 +
+
+ +
+
+target_bucket_size: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Cannot destroy non-empty hashmap + + +
const ENOT_EMPTY: u64 = 3;
+
+ + + + + +Key not found in the smart table + + +
const ENOT_FOUND: u64 = 1;
+
+ + + + + +Key already exists + + +
const EALREADY_EXIST: u64 = 4;
+
+ + + + + +Invalid target bucket size. + + +
const EEXCEED_MAX_BUCKET_SIZE: u64 = 7;
+
+ + + + + +Invalid bucket index. + + +
const EINVALID_BUCKET_INDEX: u64 = 8;
+
+ + + + + +Invalid load threshold percent to trigger split. + + +
const EINVALID_LOAD_THRESHOLD_PERCENT: u64 = 5;
+
+ + + + + +Invalid target bucket size. + + +
const EINVALID_TARGET_BUCKET_SIZE: u64 = 6;
+
+ + + + + +Invalid vector index within a bucket. + + +
const EINVALID_VECTOR_INDEX: u64 = 9;
+
+ + + + + +Smart table capacity must be larger than 0 + + +
const EZERO_CAPACITY: u64 = 2;
+
+ + + + + +## Function `new` + +Create an empty SmartTable with default configurations. + + +
public fun new<K: copy, drop, store, V: store>(): smart_table::SmartTable<K, V>
+
+ + + +
+Implementation + + +
public fun new<K: copy + drop + store, V: store>(): SmartTable<K, V> {
+    new_with_config<K, V>(0, 0, 0)
+}
+
+ + + +
+ + + +## Function `new_with_config` + +Create an empty SmartTable with customized configurations. +num_initial_buckets: The number of buckets on initialization. 0 means using default value. +split_load_threshold: The percent number which once reached, split will be triggered. 0 means using default +value. +target_bucket_size: The target number of entries per bucket, though not guaranteed. 0 means not set and will +dynamically assgined by the contract code. + + +
public fun new_with_config<K: copy, drop, store, V: store>(num_initial_buckets: u64, split_load_threshold: u8, target_bucket_size: u64): smart_table::SmartTable<K, V>
+
+ + + +
+Implementation + + +
public fun new_with_config<K: copy + drop + store, V: store>(
+    num_initial_buckets: u64,
+    split_load_threshold: u8,
+    target_bucket_size: u64
+): SmartTable<K, V> {
+    assert!(split_load_threshold <= 100, error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT));
+    let buckets = table_with_length::new();
+    table_with_length::add(&mut buckets, 0, vector::empty());
+    let table = SmartTable {
+        buckets,
+        num_buckets: 1,
+        level: 0,
+        size: 0,
+        // The default split load threshold is 75%.
+        split_load_threshold: if (split_load_threshold == 0) { 75 } else { split_load_threshold },
+        target_bucket_size,
+    };
+    // The default number of initial buckets is 2.
+    if (num_initial_buckets == 0) {
+        num_initial_buckets = 2;
+    };
+    while (num_initial_buckets > 1) {
+        num_initial_buckets = num_initial_buckets - 1;
+        split_one_bucket(&mut table);
+    };
+    table
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy empty table. +Aborts if it's not empty. + + +
public fun destroy_empty<K, V>(self: smart_table::SmartTable<K, V>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<K, V>(self: SmartTable<K, V>) {
+    assert!(self.size == 0, error::invalid_argument(ENOT_EMPTY));
+    let i = 0;
+    while (i < self.num_buckets) {
+        vector::destroy_empty(table_with_length::remove(&mut self.buckets, i));
+        i = i + 1;
+    };
+    let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = self;
+    table_with_length::destroy_empty(buckets);
+}
+
+ + + +
+ + + +## Function `destroy` + +Destroy a table completely when V has drop. + + +
public fun destroy<K: drop, V: drop>(self: smart_table::SmartTable<K, V>)
+
+ + + +
+Implementation + + +
public fun destroy<K: drop, V: drop>(self: SmartTable<K, V>) {
+    clear(&mut self);
+    destroy_empty(self);
+}
+
+ + + +
+ + + +## Function `clear` + +Clear a table completely when T has drop. + + +
public fun clear<K: drop, V: drop>(self: &mut smart_table::SmartTable<K, V>)
+
+ + + +
+Implementation + + +
public fun clear<K: drop, V: drop>(self: &mut SmartTable<K, V>) {
+    *table_with_length::borrow_mut(&mut self.buckets, 0) = vector::empty();
+    let i = 1;
+    while (i < self.num_buckets) {
+        table_with_length::remove(&mut self.buckets, i);
+        i = i + 1;
+    };
+    self.num_buckets = 1;
+    self.level = 0;
+    self.size = 0;
+}
+
+ + + +
+ + + +## Function `add` + +Add (key, value) pair in the hash map, it may grow one bucket if current load factor exceeds the threshold. +Note it may not split the actual overflowed bucket. Instead, it was determined by num_buckets and level. +For standard linear hash algorithm, it is stored as a variable but num_buckets here could be leveraged. +Abort if key already exists. +Note: This method may occasionally cost much more gas when triggering bucket split. + + +
public fun add<K, V>(self: &mut smart_table::SmartTable<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public fun add<K, V>(self: &mut SmartTable<K, V>, key: K, value: V) {
+    let hash = sip_hash_from_value(&key);
+    let index = bucket_index(self.level, self.num_buckets, hash);
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, index);
+    // We set a per-bucket limit here with a upper bound (10000) that nobody should normally reach.
+    assert!(vector::length(bucket) <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE));
+    assert!(vector::all(bucket, | entry | {
+        let e: &Entry<K, V> = entry;
+        &e.key != &key
+    }), error::invalid_argument(EALREADY_EXIST));
+    let e = Entry { hash, key, value };
+    if (self.target_bucket_size == 0) {
+        let estimated_entry_size = max(size_of_val(&e), 1);
+        self.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1);
+    };
+    vector::push_back(bucket, e);
+    self.size = self.size + 1;
+
+    if (load_factor(self) >= (self.split_load_threshold as u64)) {
+        split_one_bucket(self);
+    }
+}
+
+ + + +
+ + + +## Function `add_all` + +Add multiple key/value pairs to the smart table. The keys must not already exist. + + +
public fun add_all<K, V>(self: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + +
+Implementation + + +
public fun add_all<K, V>(self: &mut SmartTable<K, V>, keys: vector<K>, values: vector<V>) {
+    vector::zip(keys, values, |key, value| { add(self, key, value); });
+}
+
+ + + +
+ + + +## Function `unzip_entries` + + + +
fun unzip_entries<K: copy, V: copy>(entries: &vector<smart_table::Entry<K, V>>): (vector<K>, vector<V>)
+
+ + + +
+Implementation + + +
inline fun unzip_entries<K: copy, V: copy>(entries: &vector<Entry<K, V>>): (vector<K>, vector<V>) {
+    let keys = vector[];
+    let values = vector[];
+    vector::for_each_ref(entries, |e|{
+        let entry: &Entry<K, V> = e;
+        vector::push_back(&mut keys, entry.key);
+        vector::push_back(&mut values, entry.value);
+    });
+    (keys, values)
+}
+
+ + + +
+ + + +## Function `to_simple_map` + +Convert a smart table to a simple_map, which is supposed to be called mostly by view functions to get an atomic +view of the whole table. +Disclaimer: This function may be costly as the smart table may be huge in size. Use it at your own discretion. + + +
public fun to_simple_map<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
+
+ + + +
+Implementation + + +
public fun to_simple_map<K: store + copy + drop, V: store + copy>(
+    self: &SmartTable<K, V>,
+): SimpleMap<K, V> {
+    let i = 0;
+    let res = simple_map::new<K, V>();
+    while (i < self.num_buckets) {
+        let (keys, values) = unzip_entries(table_with_length::borrow(&self.buckets, i));
+        simple_map::add_all(&mut res, keys, values);
+        i = i + 1;
+    };
+    res
+}
+
+ + + +
+ + + +## Function `keys` + +Get all keys in a smart table. + +For a large enough smart table this function will fail due to execution gas limits, and +keys_paginated should be used instead. + + +
public fun keys<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): vector<K>
+
+ + + +
+Implementation + + +
public fun keys<K: store + copy + drop, V: store + copy>(
+    self: &SmartTable<K, V>
+): vector<K> {
+    let (keys, _, _) = keys_paginated(self, 0, 0, length(self));
+    keys
+}
+
+ + + +
+ + + +## Function `keys_paginated` + +Get keys from a smart table, paginated. + +This function can be used to paginate all keys in a large smart table outside of runtime, +e.g. through chained view function calls. The maximum num_keys_to_get before hitting gas +limits depends on the data types in the smart table. + +When starting pagination, pass starting_bucket_index = starting_vector_index = 0. + +The function will then return a vector of keys, an optional bucket index, and an optional +vector index. The unpacked return indices can then be used as inputs to another pagination +call, which will return a vector of more keys. This process can be repeated until the +returned bucket index and vector index value options are both none, which means that +pagination is complete. For an example, see test_keys(). + + +
public fun keys_paginated<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
+
+ + + +
+Implementation + + +
public fun keys_paginated<K: store + copy + drop, V: store + copy>(
+    self: &SmartTable<K, V>,
+    starting_bucket_index: u64,
+    starting_vector_index: u64,
+    num_keys_to_get: u64,
+): (
+    vector<K>,
+    Option<u64>,
+    Option<u64>,
+) {
+    let num_buckets = self.num_buckets;
+    let buckets_ref = &self.buckets;
+    assert!(starting_bucket_index < num_buckets, EINVALID_BUCKET_INDEX);
+    let bucket_ref = table_with_length::borrow(buckets_ref, starting_bucket_index);
+    let bucket_length = vector::length(bucket_ref);
+    assert!(
+        // In the general case, starting vector index should never be equal to bucket length
+        // because then iteration will attempt to borrow a vector element that is out of bounds.
+        // However starting vector index can be equal to bucket length in the special case of
+        // starting iteration at the beginning of an empty bucket since buckets are never
+        // destroyed, only emptied.
+        starting_vector_index < bucket_length || starting_vector_index == 0,
+        EINVALID_VECTOR_INDEX
+    );
+    let keys = vector[];
+    if (num_keys_to_get == 0) return
+        (keys, option::some(starting_bucket_index), option::some(starting_vector_index));
+    for (bucket_index in starting_bucket_index..num_buckets) {
+        bucket_ref = table_with_length::borrow(buckets_ref, bucket_index);
+        bucket_length = vector::length(bucket_ref);
+        for (vector_index in starting_vector_index..bucket_length) {
+            vector::push_back(&mut keys, vector::borrow(bucket_ref, vector_index).key);
+            num_keys_to_get = num_keys_to_get - 1;
+            if (num_keys_to_get == 0) {
+                vector_index = vector_index + 1;
+                return if (vector_index == bucket_length) {
+                    bucket_index = bucket_index + 1;
+                    if (bucket_index < num_buckets) {
+                        (keys, option::some(bucket_index), option::some(0))
+                    } else {
+                        (keys, option::none(), option::none())
+                    }
+                } else {
+                    (keys, option::some(bucket_index), option::some(vector_index))
+                }
+            };
+        };
+        starting_vector_index = 0; // Start parsing the next bucket at vector index 0.
+    };
+    (keys, option::none(), option::none())
+}
+
+ + + +
+ + + +## Function `split_one_bucket` + +Decide which is the next bucket to split and split it into two with the elements inside the bucket. + + +
fun split_one_bucket<K, V>(self: &mut smart_table::SmartTable<K, V>)
+
+ + + +
+Implementation + + +
fun split_one_bucket<K, V>(self: &mut SmartTable<K, V>) {
+    let new_bucket_index = self.num_buckets;
+    // the next bucket to split is num_bucket without the most significant bit.
+    let to_split = new_bucket_index ^ (1 << self.level);
+    self.num_buckets = new_bucket_index + 1;
+    // if the whole level is splitted once, bump the level.
+    if (to_split + 1 == 1 << self.level) {
+        self.level = self.level + 1;
+    };
+    let old_bucket = table_with_length::borrow_mut(&mut self.buckets, to_split);
+    // partition the bucket, [0..p) stays in old bucket, [p..len) goes to new bucket
+    let p = vector::partition(old_bucket, |e| {
+        let entry: &Entry<K, V> = e; // Explicit type to satisfy compiler
+        bucket_index(self.level, self.num_buckets, entry.hash) != new_bucket_index
+    });
+    let new_bucket = vector::trim_reverse(old_bucket, p);
+    table_with_length::add(&mut self.buckets, new_bucket_index, new_bucket);
+}
+
+ + + +
+ + + +## Function `bucket_index` + +Return the expected bucket index to find the hash. +Basically, it use different base 1 << level vs 1 << (level + 1) in modulo operation based on the target +bucket index compared to the index of the next bucket to split. + + +
fun bucket_index(level: u8, num_buckets: u64, hash: u64): u64
+
+ + + +
+Implementation + + +
fun bucket_index(level: u8, num_buckets: u64, hash: u64): u64 {
+    let index = hash % (1 << (level + 1));
+    if (index < num_buckets) {
+        // in existing bucket
+        index
+    } else {
+        // in unsplitted bucket
+        index % (1 << level)
+    }
+}
+
+ + + +
+ + + +## Function `borrow` + +Acquire an immutable reference to the value which key maps to. +Aborts if there is no entry for key. + + +
public fun borrow<K: drop, V>(self: &smart_table::SmartTable<K, V>, key: K): &V
+
+ + + +
+Implementation + + +
public fun borrow<K: drop, V>(self: &SmartTable<K, V>, key: K): &V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = table_with_length::borrow(&self.buckets, index);
+    let i = 0;
+    let len = vector::length(bucket);
+    while (i < len) {
+        let entry = vector::borrow(bucket, i);
+        if (&entry.key == &key) {
+            return &entry.value
+        };
+        i = i + 1;
+    };
+    abort error::invalid_argument(ENOT_FOUND)
+}
+
+ + + +
+ + + +## Function `borrow_with_default` + +Acquire an immutable reference to the value which key maps to. +Returns specified default value if there is no entry for key. + + +
public fun borrow_with_default<K: copy, drop, V>(self: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
+
+ + + +
+Implementation + + +
public fun borrow_with_default<K: copy + drop, V>(self: &SmartTable<K, V>, key: K, default: &V): &V {
+    if (!contains(self, copy key)) {
+        default
+    } else {
+        borrow(self, copy key)
+    }
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Acquire a mutable reference to the value which key maps to. +Aborts if there is no entry for key. + + +
public fun borrow_mut<K: drop, V>(self: &mut smart_table::SmartTable<K, V>, key: K): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut<K: drop, V>(self: &mut SmartTable<K, V>, key: K): &mut V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, index);
+    let i = 0;
+    let len = vector::length(bucket);
+    while (i < len) {
+        let entry = vector::borrow_mut(bucket, i);
+        if (&entry.key == &key) {
+            return &mut entry.value
+        };
+        i = i + 1;
+    };
+    abort error::invalid_argument(ENOT_FOUND)
+}
+
+ + + +
+ + + +## Function `borrow_mut_with_default` + +Acquire a mutable reference to the value which key maps to. +Insert the pair (key, default) first if there is no entry for key. + + +
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut smart_table::SmartTable<K, V>, key: K, default: V): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut_with_default<K: copy + drop, V: drop>(
+    self: &mut SmartTable<K, V>,
+    key: K,
+    default: V
+): &mut V {
+    if (!contains(self, copy key)) {
+        add(self, copy key, default)
+    };
+    borrow_mut(self, key)
+}
+
+ + + +
+ + + +## Function `contains` + +Returns true iff table contains an entry for key. + + +
public fun contains<K: drop, V>(self: &smart_table::SmartTable<K, V>, key: K): bool
+
+ + + +
+Implementation + + +
public fun contains<K: drop, V>(self: &SmartTable<K, V>, key: K): bool {
+    let hash = sip_hash_from_value(&key);
+    let index = bucket_index(self.level, self.num_buckets, hash);
+    let bucket = table_with_length::borrow(&self.buckets, index);
+    vector::any(bucket, | entry | {
+        let e: &Entry<K, V> = entry;
+        e.hash == hash && &e.key == &key
+    })
+}
+
+ + + +
+ + + +## Function `remove` + +Remove from table and return the value which key maps to. +Aborts if there is no entry for key. + + +
public fun remove<K: copy, drop, V>(self: &mut smart_table::SmartTable<K, V>, key: K): V
+
+ + + +
+Implementation + + +
public fun remove<K: copy + drop, V>(self: &mut SmartTable<K, V>, key: K): V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = table_with_length::borrow_mut(&mut self.buckets, index);
+    let i = 0;
+    let len = vector::length(bucket);
+    while (i < len) {
+        let entry = vector::borrow(bucket, i);
+        if (&entry.key == &key) {
+            let Entry { hash: _, key: _, value } = vector::swap_remove(bucket, i);
+            self.size = self.size - 1;
+            return value
+        };
+        i = i + 1;
+    };
+    abort error::invalid_argument(ENOT_FOUND)
+}
+
+ + + +
+ + + +## Function `upsert` + +Insert the pair (key, value) if there is no entry for key. +update the value of the entry for key to value otherwise + + +
public fun upsert<K: copy, drop, V: drop>(self: &mut smart_table::SmartTable<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public fun upsert<K: copy + drop, V: drop>(self: &mut SmartTable<K, V>, key: K, value: V) {
+    if (!contains(self, copy key)) {
+        add(self, copy key, value)
+    } else {
+        let ref = borrow_mut(self, key);
+        *ref = value;
+    };
+}
+
+ + + +
+ + + +## Function `length` + +Returns the length of the table, i.e. the number of entries. + + +
public fun length<K, V>(self: &smart_table::SmartTable<K, V>): u64
+
+ + + +
+Implementation + + +
public fun length<K, V>(self: &SmartTable<K, V>): u64 {
+    self.size
+}
+
+ + + +
+ + + +## Function `load_factor` + +Return the load factor of the hashtable. + + +
public fun load_factor<K, V>(self: &smart_table::SmartTable<K, V>): u64
+
+ + + +
+Implementation + + +
public fun load_factor<K, V>(self: &SmartTable<K, V>): u64 {
+    self.size * 100 / self.num_buckets / self.target_bucket_size
+}
+
+ + + +
+ + + +## Function `update_split_load_threshold` + +Update split_load_threshold. + + +
public fun update_split_load_threshold<K, V>(self: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
+
+ + + +
+Implementation + + +
public fun update_split_load_threshold<K, V>(self: &mut SmartTable<K, V>, split_load_threshold: u8) {
+    assert!(
+        split_load_threshold <= 100 && split_load_threshold > 0,
+        error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT)
+    );
+    self.split_load_threshold = split_load_threshold;
+}
+
+ + + +
+ + + +## Function `update_target_bucket_size` + +Update target_bucket_size. + + +
public fun update_target_bucket_size<K, V>(self: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
+
+ + + +
+Implementation + + +
public fun update_target_bucket_size<K, V>(self: &mut SmartTable<K, V>, target_bucket_size: u64) {
+    assert!(target_bucket_size > 0, error::invalid_argument(EINVALID_TARGET_BUCKET_SIZE));
+    self.target_bucket_size = target_bucket_size;
+}
+
+ + + +
+ + + +## Function `for_each_ref` + +Apply the function to a reference of each key-value pair in the table. + + +
public fun for_each_ref<K, V>(self: &smart_table::SmartTable<K, V>, f: |(&K, &V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_ref<K, V>(self: &SmartTable<K, V>, f: |&K, &V|) {
+    let i = 0;
+    while (i < aptos_std::smart_table::num_buckets(self)) {
+        vector::for_each_ref(
+            aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(self), i),
+            |elem| {
+                let (key, value) = aptos_std::smart_table::borrow_kv(elem);
+                f(key, value)
+            }
+        );
+        i = i + 1;
+    }
+}
+
+ + + +
+ + + +## Function `for_each_mut` + +Apply the function to a mutable reference of each key-value pair in the table. + + +
public fun for_each_mut<K, V>(self: &mut smart_table::SmartTable<K, V>, f: |(&K, &mut V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_mut<K, V>(self: &mut SmartTable<K, V>, f: |&K, &mut V|) {
+    let i = 0;
+    while (i < aptos_std::smart_table::num_buckets(self)) {
+        vector::for_each_mut(
+            table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(self), i),
+            |elem| {
+                let (key, value) = aptos_std::smart_table::borrow_kv_mut(elem);
+                f(key, value)
+            }
+        );
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `map_ref` + +Map the function over the references of key-value pairs in the table without modifying it. + + +
public fun map_ref<K: copy, drop, store, V1, V2: store>(self: &smart_table::SmartTable<K, V1>, f: |&V1|V2): smart_table::SmartTable<K, V2>
+
+ + + +
+Implementation + + +
public inline fun map_ref<K: copy + drop + store, V1, V2: store>(
+    self: &SmartTable<K, V1>,
+    f: |&V1|V2
+): SmartTable<K, V2> {
+    let new_table = new<K, V2>();
+    for_each_ref(self, |key, value| add(&mut new_table, *key, f(value)));
+    new_table
+}
+
+ + + +
+ + + +## Function `any` + +Return true if any key-value pair in the table satisfies the predicate. + + +
public fun any<K, V>(self: &smart_table::SmartTable<K, V>, p: |(&K, &V)|bool): bool
+
+ + + +
+Implementation + + +
public inline fun any<K, V>(
+    self: &SmartTable<K, V>,
+    p: |&K, &V|bool
+): bool {
+    let found = false;
+    let i = 0;
+    while (i < aptos_std::smart_table::num_buckets(self)) {
+        found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(self), i), |elem| {
+            let (key, value) = aptos_std::smart_table::borrow_kv(elem);
+            p(key, value)
+        });
+        if (found) break;
+        i = i + 1;
+    };
+    found
+}
+
+ + + +
+ + + +## Function `borrow_kv` + + + +
public fun borrow_kv<K, V>(self: &smart_table::Entry<K, V>): (&K, &V)
+
+ + + +
+Implementation + + +
public fun borrow_kv<K, V>(self: &Entry<K, V>): (&K, &V) {
+    (&self.key, &self.value)
+}
+
+ + + +
+ + + +## Function `borrow_kv_mut` + + + +
public fun borrow_kv_mut<K, V>(self: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
+
+ + + +
+Implementation + + +
public fun borrow_kv_mut<K, V>(self: &mut Entry<K, V>): (&mut K, &mut V) {
+    (&mut self.key, &mut self.value)
+}
+
+ + + +
+ + + +## Function `num_buckets` + + + +
public fun num_buckets<K, V>(self: &smart_table::SmartTable<K, V>): u64
+
+ + + +
+Implementation + + +
public fun num_buckets<K, V>(self: &SmartTable<K, V>): u64 {
+    self.num_buckets
+}
+
+ + + +
+ + + +## Function `borrow_buckets` + + + +
public fun borrow_buckets<K, V>(self: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
+ + + +
+Implementation + + +
public fun borrow_buckets<K, V>(self: &SmartTable<K, V>): &TableWithLength<u64, vector<Entry<K, V>>> {
+    &self.buckets
+}
+
+ + + +
+ + + +## Function `borrow_buckets_mut` + + + +
public fun borrow_buckets_mut<K, V>(self: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
+ + + +
+Implementation + + +
public fun borrow_buckets_mut<K, V>(self: &mut SmartTable<K, V>): &mut TableWithLength<u64, vector<Entry<K, V>>> {
+    &mut self.buckets
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `SmartTable` + + +
struct SmartTable<K, V> has store
+
+ + + +
+
+buckets: table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>> +
+
+ +
+
+num_buckets: u64 +
+
+ +
+
+level: u8 +
+
+ +
+
+size: u64 +
+
+ +
+
+split_load_threshold: u8 +
+
+ +
+
+target_bucket_size: u64 +
+
+ +
+
+ + + +
pragma intrinsic = map,
+    map_new = new,
+    map_destroy_empty = destroy_empty,
+    map_len = length,
+    map_has_key = contains,
+    map_add_no_override = add,
+    map_add_override_if_exists = upsert,
+    map_del_must_exist = remove,
+    map_borrow = borrow,
+    map_borrow_mut = borrow_mut,
+    map_borrow_mut_with_default = borrow_mut_with_default,
+    map_spec_get = spec_get,
+    map_spec_set = spec_set,
+    map_spec_del = spec_remove,
+    map_spec_len = spec_len,
+map_spec_has_key = spec_contains;
+
+ + + + + +### Function `new_with_config` + + +
public fun new_with_config<K: copy, drop, store, V: store>(num_initial_buckets: u64, split_load_threshold: u8, target_bucket_size: u64): smart_table::SmartTable<K, V>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `destroy` + + +
public fun destroy<K: drop, V: drop>(self: smart_table::SmartTable<K, V>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `clear` + + +
public fun clear<K: drop, V: drop>(self: &mut smart_table::SmartTable<K, V>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `add_all` + + +
public fun add_all<K, V>(self: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `to_simple_map` + + +
public fun to_simple_map<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `keys` + + +
public fun keys<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): vector<K>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `keys_paginated` + + +
public fun keys_paginated<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `split_one_bucket` + + +
fun split_one_bucket<K, V>(self: &mut smart_table::SmartTable<K, V>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `bucket_index` + + +
fun bucket_index(level: u8, num_buckets: u64, hash: u64): u64
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `borrow_with_default` + + +
public fun borrow_with_default<K: copy, drop, V>(self: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `load_factor` + + +
public fun load_factor<K, V>(self: &smart_table::SmartTable<K, V>): u64
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `update_split_load_threshold` + + +
public fun update_split_load_threshold<K, V>(self: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `update_target_bucket_size` + + +
public fun update_target_bucket_size<K, V>(self: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `borrow_kv` + + +
public fun borrow_kv<K, V>(self: &smart_table::Entry<K, V>): (&K, &V)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `borrow_kv_mut` + + +
public fun borrow_kv_mut<K, V>(self: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `num_buckets` + + +
public fun num_buckets<K, V>(self: &smart_table::SmartTable<K, V>): u64
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `borrow_buckets` + + +
public fun borrow_buckets<K, V>(self: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `borrow_buckets_mut` + + +
public fun borrow_buckets_mut<K, V>(self: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
+ + + + +
pragma verify = false;
+
+ + + + + + + +
native fun spec_len<K, V>(t: SmartTable<K, V>): num;
+
+ + + + + + + +
native fun spec_contains<K, V>(t: SmartTable<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: SmartTable<K, V>, k: K, v: V): SmartTable<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: SmartTable<K, V>, k: K): SmartTable<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: SmartTable<K, V>, k: K): V;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/smart_vector.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/smart_vector.md new file mode 100644 index 0000000000000..4097099654eca --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/smart_vector.md @@ -0,0 +1,1728 @@ + + + +# Module `0x1::smart_vector` + + + +- [Struct `SmartVector`](#0x1_smart_vector_SmartVector) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_smart_vector_new) +- [Function `empty`](#0x1_smart_vector_empty) +- [Function `empty_with_config`](#0x1_smart_vector_empty_with_config) +- [Function `singleton`](#0x1_smart_vector_singleton) +- [Function `destroy_empty`](#0x1_smart_vector_destroy_empty) +- [Function `destroy`](#0x1_smart_vector_destroy) +- [Function `clear`](#0x1_smart_vector_clear) +- [Function `borrow`](#0x1_smart_vector_borrow) +- [Function `borrow_mut`](#0x1_smart_vector_borrow_mut) +- [Function `append`](#0x1_smart_vector_append) +- [Function `add_all`](#0x1_smart_vector_add_all) +- [Function `to_vector`](#0x1_smart_vector_to_vector) +- [Function `push_back`](#0x1_smart_vector_push_back) +- [Function `pop_back`](#0x1_smart_vector_pop_back) +- [Function `remove`](#0x1_smart_vector_remove) +- [Function `swap_remove`](#0x1_smart_vector_swap_remove) +- [Function `swap`](#0x1_smart_vector_swap) +- [Function `reverse`](#0x1_smart_vector_reverse) +- [Function `index_of`](#0x1_smart_vector_index_of) +- [Function `contains`](#0x1_smart_vector_contains) +- [Function `length`](#0x1_smart_vector_length) +- [Function `is_empty`](#0x1_smart_vector_is_empty) +- [Function `for_each`](#0x1_smart_vector_for_each) +- [Function `for_each_reverse`](#0x1_smart_vector_for_each_reverse) +- [Function `for_each_ref`](#0x1_smart_vector_for_each_ref) +- [Function `for_each_mut`](#0x1_smart_vector_for_each_mut) +- [Function `enumerate_ref`](#0x1_smart_vector_enumerate_ref) +- [Function `enumerate_mut`](#0x1_smart_vector_enumerate_mut) +- [Function `fold`](#0x1_smart_vector_fold) +- [Function `foldr`](#0x1_smart_vector_foldr) +- [Function `map_ref`](#0x1_smart_vector_map_ref) +- [Function `map`](#0x1_smart_vector_map) +- [Function `filter`](#0x1_smart_vector_filter) +- [Function `zip`](#0x1_smart_vector_zip) +- [Function `zip_reverse`](#0x1_smart_vector_zip_reverse) +- [Function `zip_ref`](#0x1_smart_vector_zip_ref) +- [Function `zip_mut`](#0x1_smart_vector_zip_mut) +- [Function `zip_map`](#0x1_smart_vector_zip_map) +- [Function `zip_map_ref`](#0x1_smart_vector_zip_map_ref) +- [Specification](#@Specification_1) + - [Struct `SmartVector`](#@Specification_1_SmartVector) + - [Function `empty`](#@Specification_1_empty) + - [Function `empty_with_config`](#@Specification_1_empty_with_config) + - [Function `destroy_empty`](#@Specification_1_destroy_empty) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `append`](#@Specification_1_append) + - [Function `push_back`](#@Specification_1_push_back) + - [Function `pop_back`](#@Specification_1_pop_back) + - [Function `remove`](#@Specification_1_remove) + - [Function `swap_remove`](#@Specification_1_swap_remove) + - [Function `swap`](#@Specification_1_swap) + - [Function `length`](#@Specification_1_length) + + +
use 0x1::big_vector;
+use 0x1::error;
+use 0x1::math64;
+use 0x1::option;
+use 0x1::type_info;
+use 0x1::vector;
+
+ + + + + +## Struct `SmartVector` + +A Scalable vector implementation based on tables, Ts are grouped into buckets with bucket_size. +The option wrapping BigVector saves space in the metadata associated with BigVector when smart_vector is +so small that inline_vec vector can hold all the data. + + +
struct SmartVector<T> has store
+
+ + + +
+Fields + + +
+
+inline_vec: vector<T> +
+
+ +
+
+big_vec: option::Option<big_vector::BigVector<T>> +
+
+ +
+
+inline_capacity: option::Option<u64> +
+
+ +
+
+bucket_size: option::Option<u64> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Vector index is out of bounds + + +
const EINDEX_OUT_OF_BOUNDS: u64 = 1;
+
+ + + + + +Cannot pop back from an empty vector + + +
const EVECTOR_EMPTY: u64 = 3;
+
+ + + + + +Cannot destroy a non-empty vector + + +
const EVECTOR_NOT_EMPTY: u64 = 2;
+
+ + + + + +bucket_size cannot be 0 + + +
const EZERO_BUCKET_SIZE: u64 = 4;
+
+ + + + + +The length of the smart vectors are not equal. + + +
const ESMART_VECTORS_LENGTH_MISMATCH: u64 = 131077;
+
+ + + + + +## Function `new` + +Regular Vector API +Create an empty vector using default logic to estimate inline_capacity and bucket_size, which may be +inaccurate. +This is exactly the same as empty() but is more standardized as all other data structures have new(). + + +
public fun new<T: store>(): smart_vector::SmartVector<T>
+
+ + + +
+Implementation + + +
public fun new<T: store>(): SmartVector<T> {
+    empty()
+}
+
+ + + +
+ + + +## Function `empty` + +Create an empty vector using default logic to estimate inline_capacity and bucket_size, which may be +inaccurate. + + +
#[deprecated]
+public fun empty<T: store>(): smart_vector::SmartVector<T>
+
+ + + +
+Implementation + + +
public fun empty<T: store>(): SmartVector<T> {
+    SmartVector {
+        inline_vec: vector[],
+        big_vec: option::none(),
+        inline_capacity: option::none(),
+        bucket_size: option::none(),
+    }
+}
+
+ + + +
+ + + +## Function `empty_with_config` + +Create an empty vector with customized config. +When inline_capacity = 0, SmartVector degrades to a wrapper of BigVector. + + +
public fun empty_with_config<T: store>(inline_capacity: u64, bucket_size: u64): smart_vector::SmartVector<T>
+
+ + + +
+Implementation + + +
public fun empty_with_config<T: store>(inline_capacity: u64, bucket_size: u64): SmartVector<T> {
+    assert!(bucket_size > 0, error::invalid_argument(EZERO_BUCKET_SIZE));
+    SmartVector {
+        inline_vec: vector[],
+        big_vec: option::none(),
+        inline_capacity: option::some(inline_capacity),
+        bucket_size: option::some(bucket_size),
+    }
+}
+
+ + + +
+ + + +## Function `singleton` + +Create a vector of length 1 containing the passed in T. + + +
public fun singleton<T: store>(element: T): smart_vector::SmartVector<T>
+
+ + + +
+Implementation + + +
public fun singleton<T: store>(element: T): SmartVector<T> {
+    let v = empty();
+    push_back(&mut v, element);
+    v
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy the vector self. +Aborts if self is not empty. + + +
public fun destroy_empty<T>(self: smart_vector::SmartVector<T>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<T>(self: SmartVector<T>) {
+    assert!(is_empty(&self), error::invalid_argument(EVECTOR_NOT_EMPTY));
+    let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = self;
+    vector::destroy_empty(inline_vec);
+    option::destroy_none(big_vec);
+}
+
+ + + +
+ + + +## Function `destroy` + +Destroy a vector completely when T has drop. + + +
public fun destroy<T: drop>(self: smart_vector::SmartVector<T>)
+
+ + + +
+Implementation + + +
public fun destroy<T: drop>(self: SmartVector<T>) {
+    clear(&mut self);
+    destroy_empty(self);
+}
+
+ + + +
+ + + +## Function `clear` + +Clear a vector completely when T has drop. + + +
public fun clear<T: drop>(self: &mut smart_vector::SmartVector<T>)
+
+ + + +
+Implementation + + +
public fun clear<T: drop>(self: &mut SmartVector<T>) {
+    self.inline_vec = vector[];
+    if (option::is_some(&self.big_vec)) {
+        big_vector::destroy(option::extract(&mut self.big_vec));
+    }
+}
+
+ + + +
+ + + +## Function `borrow` + +Acquire an immutable reference to the ith T of the vector self. +Aborts if i is out of bounds. + + +
public fun borrow<T>(self: &smart_vector::SmartVector<T>, i: u64): &T
+
+ + + +
+Implementation + + +
public fun borrow<T>(self: &SmartVector<T>, i: u64): &T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
+    if (i < inline_len) {
+        vector::borrow(&self.inline_vec, i)
+    } else {
+        big_vector::borrow(option::borrow(&self.big_vec), i - inline_len)
+    }
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Return a mutable reference to the ith T in the vector self. +Aborts if i is out of bounds. + + +
public fun borrow_mut<T>(self: &mut smart_vector::SmartVector<T>, i: u64): &mut T
+
+ + + +
+Implementation + + +
public fun borrow_mut<T>(self: &mut SmartVector<T>, i: u64): &mut T {
+    assert!(i < length(self), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
+    if (i < inline_len) {
+        vector::borrow_mut(&mut self.inline_vec, i)
+    } else {
+        big_vector::borrow_mut(option::borrow_mut(&mut self.big_vec), i - inline_len)
+    }
+}
+
+ + + +
+ + + +## Function `append` + +Empty and destroy the other vector, and push each of the Ts in the other vector onto the self vector in the +same order as they occurred in other. +Disclaimer: This function may be costly. Use it at your own discretion. + + +
public fun append<T: store>(self: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
+
+ + + +
+Implementation + + +
public fun append<T: store>(self: &mut SmartVector<T>, other: SmartVector<T>) {
+    let other_len = length(&other);
+    let half_other_len = other_len / 2;
+    let i = 0;
+    while (i < half_other_len) {
+        push_back(self, swap_remove(&mut other, i));
+        i = i + 1;
+    };
+    while (i < other_len) {
+        push_back(self, pop_back(&mut other));
+        i = i + 1;
+    };
+    destroy_empty(other);
+}
+
+ + + +
+ + + +## Function `add_all` + +Add multiple values to the vector at once. + + +
public fun add_all<T: store>(self: &mut smart_vector::SmartVector<T>, vals: vector<T>)
+
+ + + +
+Implementation + + +
public fun add_all<T: store>(self: &mut SmartVector<T>, vals: vector<T>) {
+    vector::for_each(vals, |val| { push_back(self, val); })
+}
+
+ + + +
+ + + +## Function `to_vector` + +Convert a smart vector to a native vector, which is supposed to be called mostly by view functions to get an +atomic view of the whole vector. +Disclaimer: This function may be costly as the smart vector may be huge in size. Use it at your own discretion. + + +
public fun to_vector<T: copy, store>(self: &smart_vector::SmartVector<T>): vector<T>
+
+ + + +
+Implementation + + +
public fun to_vector<T: store + copy>(self: &SmartVector<T>): vector<T> {
+    let res = self.inline_vec;
+    if (option::is_some(&self.big_vec)) {
+        let big_vec = option::borrow(&self.big_vec);
+        vector::append(&mut res, big_vector::to_vector(big_vec));
+    };
+    res
+}
+
+ + + +
+ + + +## Function `push_back` + +Add T val to the end of the vector self. It grows the buckets when the current buckets are full. +This operation will cost more gas when it adds new bucket. + + +
public fun push_back<T: store>(self: &mut smart_vector::SmartVector<T>, val: T)
+
+ + + +
+Implementation + + +
public fun push_back<T: store>(self: &mut SmartVector<T>, val: T) {
+    let len = length(self);
+    let inline_len = vector::length(&self.inline_vec);
+    if (len == inline_len) {
+        let bucket_size = if (option::is_some(&self.inline_capacity)) {
+            if (len < *option::borrow(&self.inline_capacity)) {
+                vector::push_back(&mut self.inline_vec, val);
+                return
+            };
+            *option::borrow(&self.bucket_size)
+        } else {
+            let val_size = size_of_val(&val);
+            if (val_size * (inline_len + 1) < 150 /* magic number */) {
+                vector::push_back(&mut self.inline_vec, val);
+                return
+            };
+            let estimated_avg_size = max((size_of_val(&self.inline_vec) + val_size) / (inline_len + 1), 1);
+            max(1024 /* free_write_quota */ / estimated_avg_size, 1)
+        };
+        option::fill(&mut self.big_vec, big_vector::empty(bucket_size));
+    };
+    big_vector::push_back(option::borrow_mut(&mut self.big_vec), val);
+}
+
+ + + +
+ + + +## Function `pop_back` + +Pop an T from the end of vector self. It does shrink the buckets if they're empty. +Aborts if self is empty. + + +
public fun pop_back<T>(self: &mut smart_vector::SmartVector<T>): T
+
+ + + +
+Implementation + + +
public fun pop_back<T>(self: &mut SmartVector<T>): T {
+    assert!(!is_empty(self), error::invalid_state(EVECTOR_EMPTY));
+    let big_vec_wrapper = &mut self.big_vec;
+    if (option::is_some(big_vec_wrapper)) {
+        let big_vec = option::extract(big_vec_wrapper);
+        let val = big_vector::pop_back(&mut big_vec);
+        if (big_vector::is_empty(&big_vec)) {
+            big_vector::destroy_empty(big_vec)
+        } else {
+            option::fill(big_vec_wrapper, big_vec);
+        };
+        val
+    } else {
+        vector::pop_back(&mut self.inline_vec)
+    }
+}
+
+ + + +
+ + + +## Function `remove` + +Remove the T at index i in the vector self and return the owned value that was previously stored at i in self. +All Ts occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. +Disclaimer: This function may be costly. Use it at your own discretion. + + +
public fun remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
+
+ + + +
+Implementation + + +
public fun remove<T>(self: &mut SmartVector<T>, i: u64): T {
+    let len = length(self);
+    assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
+    if (i < inline_len) {
+        vector::remove(&mut self.inline_vec, i)
+    } else {
+        let big_vec_wrapper = &mut self.big_vec;
+        let big_vec = option::extract(big_vec_wrapper);
+        let val = big_vector::remove(&mut big_vec, i - inline_len);
+        if (big_vector::is_empty(&big_vec)) {
+            big_vector::destroy_empty(big_vec)
+        } else {
+            option::fill(big_vec_wrapper, big_vec);
+        };
+        val
+    }
+}
+
+ + + +
+ + + +## Function `swap_remove` + +Swap the ith T of the vector self with the last T and then pop the vector. +This is O(1), but does not preserve ordering of Ts in the vector. +Aborts if i is out of bounds. + + +
public fun swap_remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
+
+ + + +
+Implementation + + +
public fun swap_remove<T>(self: &mut SmartVector<T>, i: u64): T {
+    let len = length(self);
+    assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
+    let big_vec_wrapper = &mut self.big_vec;
+    let inline_vec = &mut self.inline_vec;
+    if (i >= inline_len) {
+        let big_vec = option::extract(big_vec_wrapper);
+        let val = big_vector::swap_remove(&mut big_vec, i - inline_len);
+        if (big_vector::is_empty(&big_vec)) {
+            big_vector::destroy_empty(big_vec)
+        } else {
+            option::fill(big_vec_wrapper, big_vec);
+        };
+        val
+    } else {
+        if (inline_len < len) {
+            let big_vec = option::extract(big_vec_wrapper);
+            let last_from_big_vec = big_vector::pop_back(&mut big_vec);
+            if (big_vector::is_empty(&big_vec)) {
+                big_vector::destroy_empty(big_vec)
+            } else {
+                option::fill(big_vec_wrapper, big_vec);
+            };
+            vector::push_back(inline_vec, last_from_big_vec);
+        };
+        vector::swap_remove(inline_vec, i)
+    }
+}
+
+ + + +
+ + + +## Function `swap` + +Swap the Ts at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds +for self. + + +
public fun swap<T: store>(self: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
+
+ + + +
+Implementation + + +
public fun swap<T: store>(self: &mut SmartVector<T>, i: u64, j: u64) {
+    if (i > j) {
+        return swap(self, j, i)
+    };
+    let len = length(self);
+    assert!(j < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = vector::length(&self.inline_vec);
+    if (i >= inline_len) {
+        big_vector::swap(option::borrow_mut(&mut self.big_vec), i - inline_len, j - inline_len);
+    } else if (j < inline_len) {
+        vector::swap(&mut self.inline_vec, i, j);
+    } else {
+        let big_vec = option::borrow_mut(&mut self.big_vec);
+        let inline_vec = &mut self.inline_vec;
+        let element_i = vector::swap_remove(inline_vec, i);
+        let element_j = big_vector::swap_remove(big_vec, j - inline_len);
+        vector::push_back(inline_vec, element_j);
+        vector::swap(inline_vec, i, inline_len - 1);
+        big_vector::push_back(big_vec, element_i);
+        big_vector::swap(big_vec, j - inline_len, len - inline_len - 1);
+    }
+}
+
+ + + +
+ + + +## Function `reverse` + +Reverse the order of the Ts in the vector self in-place. +Disclaimer: This function may be costly. Use it at your own discretion. + + +
public fun reverse<T: store>(self: &mut smart_vector::SmartVector<T>)
+
+ + + +
+Implementation + + +
public fun reverse<T: store>(self: &mut SmartVector<T>) {
+    let inline_len = vector::length(&self.inline_vec);
+    let i = 0;
+    let new_inline_vec = vector[];
+    // Push the last `inline_len` Ts into a temp vector.
+    while (i < inline_len) {
+        vector::push_back(&mut new_inline_vec, pop_back(self));
+        i = i + 1;
+    };
+    vector::reverse(&mut new_inline_vec);
+    // Reverse the big_vector left if exists.
+    if (option::is_some(&self.big_vec)) {
+        big_vector::reverse(option::borrow_mut(&mut self.big_vec));
+    };
+    // Mem::swap the two vectors.
+    let temp_vec = vector[];
+    while (!vector::is_empty(&mut self.inline_vec)) {
+        vector::push_back(&mut temp_vec, vector::pop_back(&mut self.inline_vec));
+    };
+    vector::reverse(&mut temp_vec);
+    while (!vector::is_empty(&mut new_inline_vec)) {
+        vector::push_back(&mut self.inline_vec, vector::pop_back(&mut new_inline_vec));
+    };
+    vector::destroy_empty(new_inline_vec);
+    // Push the rest Ts originally left in inline_vector back to the end of the smart vector.
+    while (!vector::is_empty(&mut temp_vec)) {
+        push_back(self, vector::pop_back(&mut temp_vec));
+    };
+    vector::destroy_empty(temp_vec);
+}
+
+ + + +
+ + + +## Function `index_of` + +Return (true, i) if val is in the vector self at index i. +Otherwise, returns (false, 0). +Disclaimer: This function may be costly. Use it at your own discretion. + + +
public fun index_of<T>(self: &smart_vector::SmartVector<T>, val: &T): (bool, u64)
+
+ + + +
+Implementation + + +
public fun index_of<T>(self: &SmartVector<T>, val: &T): (bool, u64) {
+    let (found, i) = vector::index_of(&self.inline_vec, val);
+    if (found) {
+        (true, i)
+    } else if (option::is_some(&self.big_vec)) {
+        let (found, i) = big_vector::index_of(option::borrow(&self.big_vec), val);
+        (found, i + vector::length(&self.inline_vec))
+    } else {
+        (false, 0)
+    }
+}
+
+ + + +
+ + + +## Function `contains` + +Return true if val is in the vector self. +Disclaimer: This function may be costly. Use it at your own discretion. + + +
public fun contains<T>(self: &smart_vector::SmartVector<T>, val: &T): bool
+
+ + + +
+Implementation + + +
public fun contains<T>(self: &SmartVector<T>, val: &T): bool {
+    if (is_empty(self)) return false;
+    let (exist, _) = index_of(self, val);
+    exist
+}
+
+ + + +
+ + + +## Function `length` + +Return the length of the vector. + + +
public fun length<T>(self: &smart_vector::SmartVector<T>): u64
+
+ + + +
+Implementation + + +
public fun length<T>(self: &SmartVector<T>): u64 {
+    vector::length(&self.inline_vec) + if (option::is_none(&self.big_vec)) {
+        0
+    } else {
+        big_vector::length(option::borrow(&self.big_vec))
+    }
+}
+
+ + + +
+ + + +## Function `is_empty` + +Return true if the vector self has no Ts and false otherwise. + + +
public fun is_empty<T>(self: &smart_vector::SmartVector<T>): bool
+
+ + + +
+Implementation + + +
public fun is_empty<T>(self: &SmartVector<T>): bool {
+    length(self) == 0
+}
+
+ + + +
+ + + +## Function `for_each` + +Apply the function to each T in the vector, consuming it. + + +
public fun for_each<T: store>(self: smart_vector::SmartVector<T>, f: |T|)
+
+ + + +
+Implementation + + +
public inline fun for_each<T: store>(self: SmartVector<T>, f: |T|) {
+    aptos_std::smart_vector::reverse(&mut self); // We need to reverse the vector to consume it efficiently
+    aptos_std::smart_vector::for_each_reverse(self, |e| f(e));
+}
+
+ + + +
+ + + +## Function `for_each_reverse` + +Apply the function to each T in the vector, consuming it. + + +
public fun for_each_reverse<T>(self: smart_vector::SmartVector<T>, f: |T|)
+
+ + + +
+Implementation + + +
public inline fun for_each_reverse<T>(self: SmartVector<T>, f: |T|) {
+    let len = aptos_std::smart_vector::length(&self);
+    while (len > 0) {
+        f(aptos_std::smart_vector::pop_back(&mut self));
+        len = len - 1;
+    };
+    aptos_std::smart_vector::destroy_empty(self)
+}
+
+ + + +
+ + + +## Function `for_each_ref` + +Apply the function to a reference of each T in the vector. + + +
public fun for_each_ref<T>(self: &smart_vector::SmartVector<T>, f: |&T|)
+
+ + + +
+Implementation + + +
public inline fun for_each_ref<T>(self: &SmartVector<T>, f: |&T|) {
+    let i = 0;
+    let len = aptos_std::smart_vector::length(self);
+    while (i < len) {
+        f(aptos_std::smart_vector::borrow(self, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `for_each_mut` + +Apply the function to a mutable reference to each T in the vector. + + +
public fun for_each_mut<T>(self: &mut smart_vector::SmartVector<T>, f: |&mut T|)
+
+ + + +
+Implementation + + +
public inline fun for_each_mut<T>(self: &mut SmartVector<T>, f: |&mut T|) {
+    let i = 0;
+    let len = aptos_std::smart_vector::length(self);
+    while (i < len) {
+        f(aptos_std::smart_vector::borrow_mut(self, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `enumerate_ref` + +Apply the function to a reference of each T in the vector with its index. + + +
public fun enumerate_ref<T>(self: &smart_vector::SmartVector<T>, f: |(u64, &T)|)
+
+ + + +
+Implementation + + +
public inline fun enumerate_ref<T>(self: &SmartVector<T>, f: |u64, &T|) {
+    let i = 0;
+    let len = aptos_std::smart_vector::length(self);
+    while (i < len) {
+        f(i, aptos_std::smart_vector::borrow(self, i));
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `enumerate_mut` + +Apply the function to a mutable reference of each T in the vector with its index. + + +
public fun enumerate_mut<T>(self: &mut smart_vector::SmartVector<T>, f: |(u64, &mut T)|)
+
+ + + +
+Implementation + + +
public inline fun enumerate_mut<T>(self: &mut SmartVector<T>, f: |u64, &mut T|) {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        f(i, borrow_mut(self, i));
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `fold` + +Fold the function over the Ts. For example, fold(vector[1,2,3], 0, f) will execute +f(f(f(0, 1), 2), 3) + + +
public fun fold<Accumulator, T: store>(self: smart_vector::SmartVector<T>, init: Accumulator, f: |(Accumulator, T)|Accumulator): Accumulator
+
+ + + +
+Implementation + + +
public inline fun fold<Accumulator, T: store>(
+    self: SmartVector<T>,
+    init: Accumulator,
+    f: |Accumulator, T|Accumulator
+): Accumulator {
+    let accu = init;
+    aptos_std::smart_vector::for_each(self, |elem| accu = f(accu, elem));
+    accu
+}
+
+ + + +
+ + + +## Function `foldr` + +Fold right like fold above but working right to left. For example, fold(vector[1,2,3], 0, f) will execute +f(1, f(2, f(3, 0))) + + +
public fun foldr<Accumulator, T>(self: smart_vector::SmartVector<T>, init: Accumulator, f: |(T, Accumulator)|Accumulator): Accumulator
+
+ + + +
+Implementation + + +
public inline fun foldr<Accumulator, T>(
+    self: SmartVector<T>,
+    init: Accumulator,
+    f: |T, Accumulator|Accumulator
+): Accumulator {
+    let accu = init;
+    aptos_std::smart_vector::for_each_reverse(self, |elem| accu = f(elem, accu));
+    accu
+}
+
+ + + +
+ + + +## Function `map_ref` + +Map the function over the references of the Ts of the vector, producing a new vector without modifying the +original vector. + + +
public fun map_ref<T1, T2: store>(self: &smart_vector::SmartVector<T1>, f: |&T1|T2): smart_vector::SmartVector<T2>
+
+ + + +
+Implementation + + +
public inline fun map_ref<T1, T2: store>(
+    self: &SmartVector<T1>,
+    f: |&T1|T2
+): SmartVector<T2> {
+    let result = aptos_std::smart_vector::new<T2>();
+    aptos_std::smart_vector::for_each_ref(self, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem)));
+    result
+}
+
+ + + +
+ + + +## Function `map` + +Map the function over the Ts of the vector, producing a new vector. + + +
public fun map<T1: store, T2: store>(self: smart_vector::SmartVector<T1>, f: |T1|T2): smart_vector::SmartVector<T2>
+
+ + + +
+Implementation + + +
public inline fun map<T1: store, T2: store>(
+    self: SmartVector<T1>,
+    f: |T1|T2
+): SmartVector<T2> {
+    let result = aptos_std::smart_vector::new<T2>();
+    aptos_std::smart_vector::for_each(self, |elem| push_back(&mut result, f(elem)));
+    result
+}
+
+ + + +
+ + + +## Function `filter` + +Filter the vector using the boolean function, removing all Ts for which p(e) is not true. + + +
public fun filter<T: drop, store>(self: smart_vector::SmartVector<T>, p: |&T|bool): smart_vector::SmartVector<T>
+
+ + + +
+Implementation + + +
public inline fun filter<T: store + drop>(
+    self: SmartVector<T>,
+    p: |&T|bool
+): SmartVector<T> {
+    let result = aptos_std::smart_vector::new<T>();
+    aptos_std::smart_vector::for_each(self, |elem| {
+        if (p(&elem)) aptos_std::smart_vector::push_back(&mut result, elem);
+    });
+    result
+}
+
+ + + +
+ + + +## Function `zip` + + + +
public fun zip<T1: store, T2: store>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
+
+ + + +
+Implementation + + +
public inline fun zip<T1: store, T2: store>(self: SmartVector<T1>, v2: SmartVector<T2>, f: |T1, T2|) {
+    // We need to reverse the vectors to consume it efficiently
+    aptos_std::smart_vector::reverse(&mut self);
+    aptos_std::smart_vector::reverse(&mut v2);
+    aptos_std::smart_vector::zip_reverse(self, v2, |e1, e2| f(e1, e2));
+}
+
+ + + +
+ + + +## Function `zip_reverse` + +Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. +This errors out if the vectors are not of the same length. + + +
public fun zip_reverse<T1, T2>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
+
+ + + +
+Implementation + + +
public inline fun zip_reverse<T1, T2>(
+    self: SmartVector<T1>,
+    v2: SmartVector<T2>,
+    f: |T1, T2|,
+) {
+    let len = aptos_std::smart_vector::length(&self);
+    // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == aptos_std::smart_vector::length(&v2), 0x20005);
+    while (len > 0) {
+        f(aptos_std::smart_vector::pop_back(&mut self), aptos_std::smart_vector::pop_back(&mut v2));
+        len = len - 1;
+    };
+    aptos_std::smart_vector::destroy_empty(self);
+    aptos_std::smart_vector::destroy_empty(v2);
+}
+
+ + + +
+ + + +## Function `zip_ref` + +Apply the function to the references of each pair of elements in the two given vectors. +This errors out if the vectors are not of the same length. + + +
public fun zip_ref<T1, T2>(self: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|)
+
+ + + +
+Implementation + + +
public inline fun zip_ref<T1, T2>(
+    self: &SmartVector<T1>,
+    v2: &SmartVector<T2>,
+    f: |&T1, &T2|,
+) {
+    let len = aptos_std::smart_vector::length(self);
+    // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == aptos_std::smart_vector::length(v2), 0x20005);
+    let i = 0;
+    while (i < len) {
+        f(aptos_std::smart_vector::borrow(self, i), aptos_std::smart_vector::borrow(v2, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `zip_mut` + +Apply the function to mutable references to each pair of elements in the two given vectors. +This errors out if the vectors are not of the same length. + + +
public fun zip_mut<T1, T2>(self: &mut smart_vector::SmartVector<T1>, v2: &mut smart_vector::SmartVector<T2>, f: |(&mut T1, &mut T2)|)
+
+ + + +
+Implementation + + +
public inline fun zip_mut<T1, T2>(
+    self: &mut SmartVector<T1>,
+    v2: &mut SmartVector<T2>,
+    f: |&mut T1, &mut T2|,
+) {
+    let i = 0;
+    let len = aptos_std::smart_vector::length(self);
+    // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == aptos_std::smart_vector::length(v2), 0x20005);
+    while (i < len) {
+        f(aptos_std::smart_vector::borrow_mut(self, i), aptos_std::smart_vector::borrow_mut(v2, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `zip_map` + +Map the function over the element pairs of the two vectors, producing a new vector. + + +
public fun zip_map<T1: store, T2: store, NewT: store>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|NewT): smart_vector::SmartVector<NewT>
+
+ + + +
+Implementation + + +
public inline fun zip_map<T1: store, T2: store, NewT: store>(
+    self: SmartVector<T1>,
+    v2: SmartVector<T2>,
+    f: |T1, T2|NewT
+): SmartVector<NewT> {
+    // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(aptos_std::smart_vector::length(&self) == aptos_std::smart_vector::length(&v2), 0x20005);
+
+    let result = aptos_std::smart_vector::new<NewT>();
+    aptos_std::smart_vector::zip(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    result
+}
+
+ + + +
+ + + +## Function `zip_map_ref` + +Map the function over the references of the element pairs of two vectors, producing a new vector from the return +values without modifying the original vectors. + + +
public fun zip_map_ref<T1, T2, NewT: store>(self: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|NewT): smart_vector::SmartVector<NewT>
+
+ + + +
+Implementation + + +
public inline fun zip_map_ref<T1, T2, NewT: store>(
+    self: &SmartVector<T1>,
+    v2: &SmartVector<T2>,
+    f: |&T1, &T2|NewT
+): SmartVector<NewT> {
+    // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(aptos_std::smart_vector::length(self) == aptos_std::smart_vector::length(v2), 0x20005);
+
+    let result = aptos_std::smart_vector::new<NewT>();
+    aptos_std::smart_vector::zip_ref(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    result
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `SmartVector` + + +
struct SmartVector<T> has store
+
+ + + +
+
+inline_vec: vector<T> +
+
+ +
+
+big_vec: option::Option<big_vector::BigVector<T>> +
+
+ +
+
+inline_capacity: option::Option<u64> +
+
+ +
+
+bucket_size: option::Option<u64> +
+
+ +
+
+ + + +
invariant option::is_none(bucket_size)
+    || (option::is_some(bucket_size) && option::borrow(bucket_size) != 0);
+invariant option::is_none(inline_capacity)
+    || (len(inline_vec) <= option::borrow(inline_capacity));
+invariant (option::is_none(inline_capacity) && option::is_none(bucket_size))
+    || (option::is_some(inline_capacity) && option::is_some(bucket_size));
+
+ + + + + +### Function `empty` + + +
#[deprecated]
+public fun empty<T: store>(): smart_vector::SmartVector<T>
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `empty_with_config` + + +
public fun empty_with_config<T: store>(inline_capacity: u64, bucket_size: u64): smart_vector::SmartVector<T>
+
+ + + + +
aborts_if bucket_size == 0;
+
+ + + + + +### Function `destroy_empty` + + +
public fun destroy_empty<T>(self: smart_vector::SmartVector<T>)
+
+ + + + +
aborts_if !(is_empty(self));
+aborts_if len(self.inline_vec) != 0
+    || option::is_some(self.big_vec);
+
+ + + + + +### Function `borrow` + + +
public fun borrow<T>(self: &smart_vector::SmartVector<T>, i: u64): &T
+
+ + + + +
aborts_if i >= length(self);
+aborts_if option::is_some(self.big_vec) && (
+    (len(self.inline_vec) + big_vector::length<T>(option::borrow(self.big_vec))) > MAX_U64
+);
+
+ + + + + +### Function `append` + + +
public fun append<T: store>(self: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `push_back` + + +
public fun push_back<T: store>(self: &mut smart_vector::SmartVector<T>, val: T)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `pop_back` + + +
public fun pop_back<T>(self: &mut smart_vector::SmartVector<T>): T
+
+ + + + +
pragma verify_duration_estimate = 120;
+aborts_if  option::is_some(self.big_vec)
+    &&
+    (table_with_length::spec_len(option::borrow(self.big_vec).buckets) == 0);
+aborts_if is_empty(self);
+aborts_if option::is_some(self.big_vec) && (
+    (len(self.inline_vec) + big_vector::length<T>(option::borrow(self.big_vec))) > MAX_U64
+);
+ensures length(self) == length(old(self)) - 1;
+
+ + + + + +### Function `remove` + + +
public fun remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `swap_remove` + + +
public fun swap_remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
+
+ + + + +
pragma verify = false;
+aborts_if i >= length(self);
+aborts_if option::is_some(self.big_vec) && (
+    (len(self.inline_vec) + big_vector::length<T>(option::borrow(self.big_vec))) > MAX_U64
+);
+ensures length(self) == length(old(self)) - 1;
+
+ + + + + +### Function `swap` + + +
public fun swap<T: store>(self: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `length` + + +
public fun length<T>(self: &smart_vector::SmartVector<T>): u64
+
+ + + + +
aborts_if option::is_some(self.big_vec) && len(self.inline_vec) + big_vector::length(option::spec_borrow(
+    self.big_vec)) > MAX_U64;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/string_utils.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/string_utils.md new file mode 100644 index 0000000000000..396d1c84fb50c --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/string_utils.md @@ -0,0 +1,763 @@ + + + +# Module `0x1::string_utils` + +A module for formatting move values as strings. + + +- [Struct `Cons`](#0x1_string_utils_Cons) +- [Struct `NIL`](#0x1_string_utils_NIL) +- [Struct `FakeCons`](#0x1_string_utils_FakeCons) + - [[test_only]](#@[test_only]_0) +- [Constants](#@Constants_1) +- [Function `to_string`](#0x1_string_utils_to_string) +- [Function `to_string_with_canonical_addresses`](#0x1_string_utils_to_string_with_canonical_addresses) +- [Function `to_string_with_integer_types`](#0x1_string_utils_to_string_with_integer_types) +- [Function `debug_string`](#0x1_string_utils_debug_string) +- [Function `format1`](#0x1_string_utils_format1) +- [Function `format2`](#0x1_string_utils_format2) +- [Function `format3`](#0x1_string_utils_format3) +- [Function `format4`](#0x1_string_utils_format4) +- [Function `cons`](#0x1_string_utils_cons) +- [Function `nil`](#0x1_string_utils_nil) +- [Function `list1`](#0x1_string_utils_list1) +- [Function `list2`](#0x1_string_utils_list2) +- [Function `list3`](#0x1_string_utils_list3) +- [Function `list4`](#0x1_string_utils_list4) +- [Function `native_format`](#0x1_string_utils_native_format) +- [Function `native_format_list`](#0x1_string_utils_native_format_list) +- [Specification](#@Specification_2) + - [Function `to_string`](#@Specification_2_to_string) + - [Function `to_string_with_canonical_addresses`](#@Specification_2_to_string_with_canonical_addresses) + - [Function `to_string_with_integer_types`](#@Specification_2_to_string_with_integer_types) + - [Function `debug_string`](#@Specification_2_debug_string) + - [Function `format1`](#@Specification_2_format1) + - [Function `format2`](#@Specification_2_format2) + - [Function `format3`](#@Specification_2_format3) + - [Function `format4`](#@Specification_2_format4) + - [Function `native_format`](#@Specification_2_native_format) + - [Function `native_format_list`](#@Specification_2_native_format_list) + + +
use 0x1::string;
+
+ + + + + +## Struct `Cons` + + + +
struct Cons<T, N> has copy, drop, store
+
+ + + +
+Fields + + +
+
+car: T +
+
+ +
+
+cdr: N +
+
+ +
+
+ + +
+ + + +## Struct `NIL` + + + +
struct NIL has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FakeCons` + + + + +### [test_only] + + + +
struct FakeCons<T, N> has copy, drop, store
+
+ + + +
+Fields + + +
+
+car: T +
+
+ +
+
+cdr: N +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The number of values in the list does not match the number of "{}" in the format string. + + +
const EARGS_MISMATCH: u64 = 1;
+
+ + + + + +The format string is not valid. + + +
const EINVALID_FORMAT: u64 = 2;
+
+ + + + + +Formatting is not possible because the value contains delayed fields such as aggregators. + + +
const EUNABLE_TO_FORMAT_DELAYED_FIELD: u64 = 3;
+
+ + + + + +## Function `to_string` + +Format a move value as a human readable string, +eg. to_string(&1u64) == "1", to_string(&false) == "false", to_string(&@0x1) == "@0x1". +For vectors and structs the format is similar to rust, eg. +to_string(&cons(1,2)) == "Cons { car: 1, cdr: 2 }" and to_string(&vector[1, 2, 3]) == "[ 1, 2, 3 ]" +For vectors of u8 the output is hex encoded, eg. to_string(&vector[1u8, 2u8, 3u8]) == "0x010203" +For std::string::String the output is the string itself including quotes, eg. +to_string(&std::string::utf8(b"My string")) == "\"My string\"" + + +
public fun to_string<T>(s: &T): string::String
+
+ + + +
+Implementation + + +
public fun to_string<T>(s: &T): String {
+    native_format(s, false, false, true, false)
+}
+
+ + + +
+ + + +## Function `to_string_with_canonical_addresses` + +Format addresses as 64 zero-padded hexadecimals. + + +
public fun to_string_with_canonical_addresses<T>(s: &T): string::String
+
+ + + +
+Implementation + + +
public fun to_string_with_canonical_addresses<T>(s: &T): String {
+    native_format(s, false, true, true, false)
+}
+
+ + + +
+ + + +## Function `to_string_with_integer_types` + +Format emitting integers with types ie. 6u8 or 128u32. + + +
public fun to_string_with_integer_types<T>(s: &T): string::String
+
+ + + +
+Implementation + + +
public fun to_string_with_integer_types<T>(s: &T): String {
+    native_format(s, false, true, true, false)
+}
+
+ + + +
+ + + +## Function `debug_string` + +Format vectors and structs with newlines and indentation. + + +
public fun debug_string<T>(s: &T): string::String
+
+ + + +
+Implementation + + +
public fun debug_string<T>(s: &T): String {
+    native_format(s, true, false, false, false)
+}
+
+ + + +
+ + + +## Function `format1` + +Formatting with a rust-like format string, eg. format2(&b"a = {}, b = {}", 1, 2) == "a = 1, b = 2". + + +
public fun format1<T0: drop>(fmt: &vector<u8>, a: T0): string::String
+
+ + + +
+Implementation + + +
public fun format1<T0: drop>(fmt: &vector<u8>, a: T0): String {
+    native_format_list(fmt, &list1(a))
+}
+
+ + + +
+ + + +## Function `format2` + + + +
public fun format2<T0: drop, T1: drop>(fmt: &vector<u8>, a: T0, b: T1): string::String
+
+ + + +
+Implementation + + +
public fun format2<T0: drop, T1: drop>(fmt: &vector<u8>, a: T0, b: T1): String {
+    native_format_list(fmt, &list2(a, b))
+}
+
+ + + +
+ + + +## Function `format3` + + + +
public fun format3<T0: drop, T1: drop, T2: drop>(fmt: &vector<u8>, a: T0, b: T1, c: T2): string::String
+
+ + + +
+Implementation + + +
public fun format3<T0: drop, T1: drop, T2: drop>(fmt: &vector<u8>, a: T0, b: T1, c: T2): String {
+    native_format_list(fmt, &list3(a, b, c))
+}
+
+ + + +
+ + + +## Function `format4` + + + +
public fun format4<T0: drop, T1: drop, T2: drop, T3: drop>(fmt: &vector<u8>, a: T0, b: T1, c: T2, d: T3): string::String
+
+ + + +
+Implementation + + +
public fun format4<T0: drop, T1: drop, T2: drop, T3: drop>(fmt: &vector<u8>, a: T0, b: T1, c: T2, d: T3): String {
+    native_format_list(fmt, &list4(a, b, c, d))
+}
+
+ + + +
+ + + +## Function `cons` + + + +
fun cons<T, N>(car: T, cdr: N): string_utils::Cons<T, N>
+
+ + + +
+Implementation + + +
fun cons<T, N>(car: T, cdr: N): Cons<T, N> { Cons { car, cdr } }
+
+ + + +
+ + + +## Function `nil` + + + +
fun nil(): string_utils::NIL
+
+ + + +
+Implementation + + +
fun nil(): NIL { NIL {} }
+
+ + + +
+ + + +## Function `list1` + + + +
fun list1<T0>(a: T0): string_utils::Cons<T0, string_utils::NIL>
+
+ + + +
+Implementation + + +
inline fun list1<T0>(a: T0): Cons<T0, NIL> { cons(a, nil()) }
+
+ + + +
+ + + +## Function `list2` + + + +
fun list2<T0, T1>(a: T0, b: T1): string_utils::Cons<T0, string_utils::Cons<T1, string_utils::NIL>>
+
+ + + +
+Implementation + + +
inline fun list2<T0, T1>(a: T0, b: T1): Cons<T0, Cons<T1, NIL>> { cons(a, list1(b)) }
+
+ + + +
+ + + +## Function `list3` + + + +
fun list3<T0, T1, T2>(a: T0, b: T1, c: T2): string_utils::Cons<T0, string_utils::Cons<T1, string_utils::Cons<T2, string_utils::NIL>>>
+
+ + + +
+Implementation + + +
inline fun list3<T0, T1, T2>(a: T0, b: T1, c: T2): Cons<T0, Cons<T1, Cons<T2, NIL>>> { cons(a, list2(b, c)) }
+
+ + + +
+ + + +## Function `list4` + + + +
fun list4<T0, T1, T2, T3>(a: T0, b: T1, c: T2, d: T3): string_utils::Cons<T0, string_utils::Cons<T1, string_utils::Cons<T2, string_utils::Cons<T3, string_utils::NIL>>>>
+
+ + + +
+Implementation + + +
inline fun list4<T0, T1, T2, T3>(a: T0, b: T1, c: T2, d: T3): Cons<T0, Cons<T1, Cons<T2, Cons<T3, NIL>>>> { cons(a, list3(b, c, d)) }
+
+ + + +
+ + + +## Function `native_format` + + + +
fun native_format<T>(s: &T, type_tag: bool, canonicalize: bool, single_line: bool, include_int_types: bool): string::String
+
+ + + +
+Implementation + + +
native fun native_format<T>(s: &T, type_tag: bool, canonicalize: bool, single_line: bool, include_int_types: bool): String;
+
+ + + +
+ + + +## Function `native_format_list` + + + +
fun native_format_list<T>(fmt: &vector<u8>, val: &T): string::String
+
+ + + +
+Implementation + + +
native fun native_format_list<T>(fmt: &vector<u8>, val: &T): String;
+
+ + + +
+ + + +## Specification + + + + +### Function `to_string` + + +
public fun to_string<T>(s: &T): string::String
+
+ + + + +
aborts_if false;
+ensures result == spec_native_format(s, false, false, true, false);
+
+ + + + + +### Function `to_string_with_canonical_addresses` + + +
public fun to_string_with_canonical_addresses<T>(s: &T): string::String
+
+ + + + +
aborts_if false;
+ensures result == spec_native_format(s, false, true, true, false);
+
+ + + + + +### Function `to_string_with_integer_types` + + +
public fun to_string_with_integer_types<T>(s: &T): string::String
+
+ + + + +
aborts_if false;
+ensures result == spec_native_format(s, false, true, true, false);
+
+ + + + + +### Function `debug_string` + + +
public fun debug_string<T>(s: &T): string::String
+
+ + + + +
aborts_if false;
+ensures result == spec_native_format(s, true, false, false, false);
+
+ + + + + +### Function `format1` + + +
public fun format1<T0: drop>(fmt: &vector<u8>, a: T0): string::String
+
+ + + + +
aborts_if args_mismatch_or_invalid_format(fmt, list1(a));
+ensures result == spec_native_format_list(fmt, list1(a));
+
+ + + + + +### Function `format2` + + +
public fun format2<T0: drop, T1: drop>(fmt: &vector<u8>, a: T0, b: T1): string::String
+
+ + + + +
aborts_if args_mismatch_or_invalid_format(fmt, list2(a, b));
+ensures result == spec_native_format_list(fmt, list2(a, b));
+
+ + + + + +### Function `format3` + + +
public fun format3<T0: drop, T1: drop, T2: drop>(fmt: &vector<u8>, a: T0, b: T1, c: T2): string::String
+
+ + + + +
aborts_if args_mismatch_or_invalid_format(fmt, list3(a, b, c));
+ensures result == spec_native_format_list(fmt, list3(a, b, c));
+
+ + + + + +### Function `format4` + + +
public fun format4<T0: drop, T1: drop, T2: drop, T3: drop>(fmt: &vector<u8>, a: T0, b: T1, c: T2, d: T3): string::String
+
+ + + + +
aborts_if args_mismatch_or_invalid_format(fmt, list4(a, b, c, d));
+ensures result == spec_native_format_list(fmt, list4(a, b, c, d));
+
+ + + + + +### Function `native_format` + + +
fun native_format<T>(s: &T, type_tag: bool, canonicalize: bool, single_line: bool, include_int_types: bool): string::String
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_native_format(s, type_tag, canonicalize, single_line, include_int_types);
+
+ + + + + +### Function `native_format_list` + + +
fun native_format_list<T>(fmt: &vector<u8>, val: &T): string::String
+
+ + + + +
pragma opaque;
+aborts_if args_mismatch_or_invalid_format(fmt, val);
+ensures result == spec_native_format_list(fmt, val);
+
+ + + + + + + +
fun spec_native_format<T>(s: T, type_tag: bool, canonicalize: bool, single_line: bool, include_int_types: bool): String;
+
+ + + + + + + +
fun spec_native_format_list<T>(fmt: vector<u8>, val: T): String;
+
+ + + + + + + +
fun args_mismatch_or_invalid_format<T>(fmt: vector<u8>, val: T): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/table.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/table.md new file mode 100644 index 0000000000000..44f2e5627b7d3 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/table.md @@ -0,0 +1,779 @@ + + + +# Module `0x1::table` + +Type of large-scale storage tables. +source: https://github.com/move-language/move/blob/1b6b7513dcc1a5c866f178ca5c1e74beb2ce181e/language/extensions/move-table-extension/sources/Table.move#L1 + +It implements the Table type which supports individual table items to be represented by +separate global state items. The number of items and a unique handle are tracked on the table +struct itself, while the operations are implemented as native functions. No traversal is provided. + + +- [Struct `Table`](#0x1_table_Table) +- [Resource `Box`](#0x1_table_Box) +- [Function `new`](#0x1_table_new) +- [Function `add`](#0x1_table_add) +- [Function `borrow`](#0x1_table_borrow) +- [Function `borrow_with_default`](#0x1_table_borrow_with_default) +- [Function `borrow_mut`](#0x1_table_borrow_mut) +- [Function `borrow_mut_with_default`](#0x1_table_borrow_mut_with_default) +- [Function `upsert`](#0x1_table_upsert) +- [Function `remove`](#0x1_table_remove) +- [Function `contains`](#0x1_table_contains) +- [Function `destroy`](#0x1_table_destroy) +- [Function `new_table_handle`](#0x1_table_new_table_handle) +- [Function `add_box`](#0x1_table_add_box) +- [Function `borrow_box`](#0x1_table_borrow_box) +- [Function `borrow_box_mut`](#0x1_table_borrow_box_mut) +- [Function `contains_box`](#0x1_table_contains_box) +- [Function `remove_box`](#0x1_table_remove_box) +- [Function `destroy_empty_box`](#0x1_table_destroy_empty_box) +- [Function `drop_unchecked_box`](#0x1_table_drop_unchecked_box) +- [Specification](#@Specification_0) + - [Struct `Table`](#@Specification_0_Table) + - [Function `new`](#@Specification_0_new) + - [Function `add`](#@Specification_0_add) + - [Function `borrow`](#@Specification_0_borrow) + - [Function `borrow_mut`](#@Specification_0_borrow_mut) + - [Function `borrow_mut_with_default`](#@Specification_0_borrow_mut_with_default) + - [Function `upsert`](#@Specification_0_upsert) + - [Function `remove`](#@Specification_0_remove) + - [Function `contains`](#@Specification_0_contains) + - [Function `destroy`](#@Specification_0_destroy) + + +
+ + + + + +## Struct `Table` + +Type of tables + + +
struct Table<K: copy, drop, V> has store
+
+ + + +
+Fields + + +
+
+handle: address +
+
+ +
+
+ + +
+ + + +## Resource `Box` + +Wrapper for values. Required for making values appear as resources in the implementation. + + +
struct Box<V> has drop, store, key
+
+ + + +
+Fields + + +
+
+val: V +
+
+ +
+
+ + +
+ + + +## Function `new` + +Create a new Table. + + +
public fun new<K: copy, drop, V: store>(): table::Table<K, V>
+
+ + + +
+Implementation + + +
public fun new<K: copy + drop, V: store>(): Table<K, V> {
+    Table {
+        handle: new_table_handle<K, V>(),
+    }
+}
+
+ + + +
+ + + +## Function `add` + +Add a new entry to the table. Aborts if an entry for this +key already exists. The entry itself is not stored in the +table, and cannot be discovered from it. + + +
public fun add<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K, val: V)
+
+ + + +
+Implementation + + +
public fun add<K: copy + drop, V>(self: &mut Table<K, V>, key: K, val: V) {
+    add_box<K, V, Box<V>>(self, key, Box { val })
+}
+
+ + + +
+ + + +## Function `borrow` + +Acquire an immutable reference to the value which key maps to. +Aborts if there is no entry for key. + + +
public fun borrow<K: copy, drop, V>(self: &table::Table<K, V>, key: K): &V
+
+ + + +
+Implementation + + +
public fun borrow<K: copy + drop, V>(self: &Table<K, V>, key: K): &V {
+    &borrow_box<K, V, Box<V>>(self, key).val
+}
+
+ + + +
+ + + +## Function `borrow_with_default` + +Acquire an immutable reference to the value which key maps to. +Returns specified default value if there is no entry for key. + + +
public fun borrow_with_default<K: copy, drop, V>(self: &table::Table<K, V>, key: K, default: &V): &V
+
+ + + +
+Implementation + + +
public fun borrow_with_default<K: copy + drop, V>(self: &Table<K, V>, key: K, default: &V): &V {
+    if (!contains(self, copy key)) {
+        default
+    } else {
+        borrow(self, copy key)
+    }
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Acquire a mutable reference to the value which key maps to. +Aborts if there is no entry for key. + + +
public fun borrow_mut<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut<K: copy + drop, V>(self: &mut Table<K, V>, key: K): &mut V {
+    &mut borrow_box_mut<K, V, Box<V>>(self, key).val
+}
+
+ + + +
+ + + +## Function `borrow_mut_with_default` + +Acquire a mutable reference to the value which key maps to. +Insert the pair (key, default) first if there is no entry for key. + + +
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, default: V): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut_with_default<K: copy + drop, V: drop>(self: &mut Table<K, V>, key: K, default: V): &mut V {
+    if (!contains(self, copy key)) {
+        add(self, copy key, default)
+    };
+    borrow_mut(self, key)
+}
+
+ + + +
+ + + +## Function `upsert` + +Insert the pair (key, value) if there is no entry for key. +update the value of the entry for key to value otherwise + + +
public fun upsert<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public fun upsert<K: copy + drop, V: drop>(self: &mut Table<K, V>, key: K, value: V) {
+    if (!contains(self, copy key)) {
+        add(self, copy key, value)
+    } else {
+        let ref = borrow_mut(self, key);
+        *ref = value;
+    };
+}
+
+ + + +
+ + + +## Function `remove` + +Remove from self and return the value which key maps to. +Aborts if there is no entry for key. + + +
public fun remove<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): V
+
+ + + +
+Implementation + + +
public fun remove<K: copy + drop, V>(self: &mut Table<K, V>, key: K): V {
+    let Box { val } = remove_box<K, V, Box<V>>(self, key);
+    val
+}
+
+ + + +
+ + + +## Function `contains` + +Returns true iff self contains an entry for key. + + +
public fun contains<K: copy, drop, V>(self: &table::Table<K, V>, key: K): bool
+
+ + + +
+Implementation + + +
public fun contains<K: copy + drop, V>(self: &Table<K, V>, key: K): bool {
+    contains_box<K, V, Box<V>>(self, key)
+}
+
+ + + +
+ + + +## Function `destroy` + + + +
public(friend) fun destroy<K: copy, drop, V>(self: table::Table<K, V>)
+
+ + + +
+Implementation + + +
public(friend) fun destroy<K: copy + drop, V>(self: Table<K, V>) {
+    destroy_empty_box<K, V, Box<V>>(&self);
+    drop_unchecked_box<K, V, Box<V>>(self)
+}
+
+ + + +
+ + + +## Function `new_table_handle` + + + +
fun new_table_handle<K, V>(): address
+
+ + + +
+Implementation + + +
native fun new_table_handle<K, V>(): address;
+
+ + + +
+ + + +## Function `add_box` + + + +
fun add_box<K: copy, drop, V, B>(table: &mut table::Table<K, V>, key: K, val: table::Box<V>)
+
+ + + +
+Implementation + + +
native fun add_box<K: copy + drop, V, B>(table: &mut Table<K, V>, key: K, val: Box<V>);
+
+ + + +
+ + + +## Function `borrow_box` + + + +
fun borrow_box<K: copy, drop, V, B>(table: &table::Table<K, V>, key: K): &table::Box<V>
+
+ + + +
+Implementation + + +
native fun borrow_box<K: copy + drop, V, B>(table: &Table<K, V>, key: K): &Box<V>;
+
+ + + +
+ + + +## Function `borrow_box_mut` + + + +
fun borrow_box_mut<K: copy, drop, V, B>(table: &mut table::Table<K, V>, key: K): &mut table::Box<V>
+
+ + + +
+Implementation + + +
native fun borrow_box_mut<K: copy + drop, V, B>(table: &mut Table<K, V>, key: K): &mut Box<V>;
+
+ + + +
+ + + +## Function `contains_box` + + + +
fun contains_box<K: copy, drop, V, B>(table: &table::Table<K, V>, key: K): bool
+
+ + + +
+Implementation + + +
native fun contains_box<K: copy + drop, V, B>(table: &Table<K, V>, key: K): bool;
+
+ + + +
+ + + +## Function `remove_box` + + + +
fun remove_box<K: copy, drop, V, B>(table: &mut table::Table<K, V>, key: K): table::Box<V>
+
+ + + +
+Implementation + + +
native fun remove_box<K: copy + drop, V, B>(table: &mut Table<K, V>, key: K): Box<V>;
+
+ + + +
+ + + +## Function `destroy_empty_box` + + + +
fun destroy_empty_box<K: copy, drop, V, B>(table: &table::Table<K, V>)
+
+ + + +
+Implementation + + +
native fun destroy_empty_box<K: copy + drop, V, B>(table: &Table<K, V>);
+
+ + + +
+ + + +## Function `drop_unchecked_box` + + + +
fun drop_unchecked_box<K: copy, drop, V, B>(table: table::Table<K, V>)
+
+ + + +
+Implementation + + +
native fun drop_unchecked_box<K: copy + drop, V, B>(table: Table<K, V>);
+
+ + + +
+ + + +## Specification + + + + +### Struct `Table` + + +
struct Table<K: copy, drop, V> has store
+
+ + + +
+
+handle: address +
+
+ +
+
+ + + +
pragma intrinsic = map,
+    map_new = new,
+    map_destroy_empty = destroy,
+    map_has_key = contains,
+    map_add_no_override = add,
+    map_add_override_if_exists = upsert,
+    map_del_must_exist = remove,
+    map_borrow = borrow,
+    map_borrow_mut = borrow_mut,
+    map_borrow_mut_with_default = borrow_mut_with_default,
+    map_spec_get = spec_get,
+    map_spec_set = spec_set,
+    map_spec_del = spec_remove,
+    map_spec_has_key = spec_contains;
+
+ + + + + +### Function `new` + + +
public fun new<K: copy, drop, V: store>(): table::Table<K, V>
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K, val: V)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow` + + +
public fun borrow<K: copy, drop, V>(self: &table::Table<K, V>, key: K): &V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): &mut V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut_with_default` + + +
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, default: V): &mut V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `upsert` + + +
public fun upsert<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, value: V)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `remove` + + +
public fun remove<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `contains` + + +
public fun contains<K: copy, drop, V>(self: &table::Table<K, V>, key: K): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + + + +
native fun spec_contains<K, V>(t: Table<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: Table<K, V>, k: K): Table<K, V>;
+
+ + + + + + + +
native fun spec_set<K, V>(t: Table<K, V>, k: K, v: V): Table<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: Table<K, V>, k: K): V;
+
+ + + + + +### Function `destroy` + + +
public(friend) fun destroy<K: copy, drop, V>(self: table::Table<K, V>)
+
+ + + + +
pragma intrinsic;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/table_with_length.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/table_with_length.md new file mode 100644 index 0000000000000..3fa48d8881edf --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/table_with_length.md @@ -0,0 +1,684 @@ + + + +# Module `0x1::table_with_length` + +Extends Table and provides functions such as length and the ability to be destroyed + + +- [Struct `TableWithLength`](#0x1_table_with_length_TableWithLength) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_table_with_length_new) +- [Function `destroy_empty`](#0x1_table_with_length_destroy_empty) +- [Function `add`](#0x1_table_with_length_add) +- [Function `borrow`](#0x1_table_with_length_borrow) +- [Function `borrow_mut`](#0x1_table_with_length_borrow_mut) +- [Function `length`](#0x1_table_with_length_length) +- [Function `empty`](#0x1_table_with_length_empty) +- [Function `borrow_mut_with_default`](#0x1_table_with_length_borrow_mut_with_default) +- [Function `upsert`](#0x1_table_with_length_upsert) +- [Function `remove`](#0x1_table_with_length_remove) +- [Function `contains`](#0x1_table_with_length_contains) +- [Specification](#@Specification_1) + - [Struct `TableWithLength`](#@Specification_1_TableWithLength) + - [Function `new`](#@Specification_1_new) + - [Function `destroy_empty`](#@Specification_1_destroy_empty) + - [Function `add`](#@Specification_1_add) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `borrow_mut`](#@Specification_1_borrow_mut) + - [Function `length`](#@Specification_1_length) + - [Function `empty`](#@Specification_1_empty) + - [Function `borrow_mut_with_default`](#@Specification_1_borrow_mut_with_default) + - [Function `upsert`](#@Specification_1_upsert) + - [Function `remove`](#@Specification_1_remove) + - [Function `contains`](#@Specification_1_contains) + + +
use 0x1::error;
+use 0x1::table;
+
+ + + + + +## Struct `TableWithLength` + +Type of tables + + +
struct TableWithLength<K: copy, drop, V> has store
+
+ + + +
+Fields + + +
+
+inner: table::Table<K, V> +
+
+ +
+
+length: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EALREADY_EXISTS: u64 = 100;
+
+ + + + + + + +
const ENOT_EMPTY: u64 = 102;
+
+ + + + + + + +
const ENOT_FOUND: u64 = 101;
+
+ + + + + +## Function `new` + +Create a new Table. + + +
public fun new<K: copy, drop, V: store>(): table_with_length::TableWithLength<K, V>
+
+ + + +
+Implementation + + +
public fun new<K: copy + drop, V: store>(): TableWithLength<K, V> {
+    TableWithLength {
+        inner: table::new<K, V>(),
+        length: 0,
+    }
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy a table. The table must be empty to succeed. + + +
public fun destroy_empty<K: copy, drop, V>(self: table_with_length::TableWithLength<K, V>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<K: copy + drop, V>(self: TableWithLength<K, V>) {
+    assert!(self.length == 0, error::invalid_state(ENOT_EMPTY));
+    let TableWithLength { inner, length: _ } = self;
+    table::destroy(inner)
+}
+
+ + + +
+ + + +## Function `add` + +Add a new entry to the table. Aborts if an entry for this +key already exists. The entry itself is not stored in the +table, and cannot be discovered from it. + + +
public fun add<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
+
+ + + +
+Implementation + + +
public fun add<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K, val: V) {
+    table::add(&mut self.inner, key, val);
+    self.length = self.length + 1;
+}
+
+ + + +
+ + + +## Function `borrow` + +Acquire an immutable reference to the value which key maps to. +Aborts if there is no entry for key. + + +
public fun borrow<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): &V
+
+ + + +
+Implementation + + +
public fun borrow<K: copy + drop, V>(self: &TableWithLength<K, V>, key: K): &V {
+    table::borrow(&self.inner, key)
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Acquire a mutable reference to the value which key maps to. +Aborts if there is no entry for key. + + +
public fun borrow_mut<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K): &mut V {
+    table::borrow_mut(&mut self.inner, key)
+}
+
+ + + +
+ + + +## Function `length` + +Returns the length of the table, i.e. the number of entries. + + +
public fun length<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): u64
+
+ + + +
+Implementation + + +
public fun length<K: copy + drop, V>(self: &TableWithLength<K, V>): u64 {
+    self.length
+}
+
+ + + +
+ + + +## Function `empty` + +Returns true if this table is empty. + + +
public fun empty<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): bool
+
+ + + +
+Implementation + + +
public fun empty<K: copy + drop, V>(self: &TableWithLength<K, V>): bool {
+    self.length == 0
+}
+
+ + + +
+ + + +## Function `borrow_mut_with_default` + +Acquire a mutable reference to the value which key maps to. +Insert the pair (key, default) first if there is no entry for key. + + +
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut_with_default<K: copy + drop, V: drop>(self: &mut TableWithLength<K, V>, key: K, default: V): &mut V {
+    if (table::contains(&self.inner, key)) {
+        table::borrow_mut(&mut self.inner, key)
+    } else {
+        table::add(&mut self.inner, key, default);
+        self.length = self.length + 1;
+        table::borrow_mut(&mut self.inner, key)
+    }
+}
+
+ + + +
+ + + +## Function `upsert` + +Insert the pair (key, value) if there is no entry for key. +update the value of the entry for key to value otherwise + + +
public fun upsert<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public fun upsert<K: copy + drop, V: drop>(self: &mut TableWithLength<K, V>, key: K, value: V) {
+    if (!table::contains(&self.inner, key)) {
+        add(self, copy key, value)
+    } else {
+        let ref = table::borrow_mut(&mut self.inner, key);
+        *ref = value;
+    };
+}
+
+ + + +
+ + + +## Function `remove` + +Remove from table and return the value which key maps to. +Aborts if there is no entry for key. + + +
public fun remove<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): V
+
+ + + +
+Implementation + + +
public fun remove<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K): V {
+    let val = table::remove(&mut self.inner, key);
+    self.length = self.length - 1;
+    val
+}
+
+ + + +
+ + + +## Function `contains` + +Returns true iff table contains an entry for key. + + +
public fun contains<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): bool
+
+ + + +
+Implementation + + +
public fun contains<K: copy + drop, V>(self: &TableWithLength<K, V>, key: K): bool {
+    table::contains(&self.inner, key)
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `TableWithLength` + + +
struct TableWithLength<K: copy, drop, V> has store
+
+ + + +
+
+inner: table::Table<K, V> +
+
+ +
+
+length: u64 +
+
+ +
+
+ + + +
pragma intrinsic = map,
+    map_new = new,
+    map_destroy_empty = destroy_empty,
+    map_len = length,
+    map_is_empty = empty,
+    map_has_key = contains,
+    map_add_no_override = add,
+    map_add_override_if_exists = upsert,
+    map_del_must_exist = remove,
+    map_borrow = borrow,
+    map_borrow_mut = borrow_mut,
+    map_borrow_mut_with_default = borrow_mut_with_default,
+    map_spec_get = spec_get,
+    map_spec_set = spec_set,
+    map_spec_del = spec_remove,
+    map_spec_len = spec_len,
+    map_spec_has_key = spec_contains;
+
+ + + + + +### Function `new` + + +
public fun new<K: copy, drop, V: store>(): table_with_length::TableWithLength<K, V>
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `destroy_empty` + + +
public fun destroy_empty<K: copy, drop, V>(self: table_with_length::TableWithLength<K, V>)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow` + + +
public fun borrow<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): &V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `length` + + +
public fun length<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): u64
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `empty` + + +
public fun empty<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut_with_default` + + +
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
+
+ + + + +
aborts_if false;
+pragma intrinsic;
+
+ + + + + +### Function `upsert` + + +
public fun upsert<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `remove` + + +
public fun remove<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `contains` + + +
public fun contains<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + + + +
native fun spec_len<K, V>(t: TableWithLength<K, V>): num;
+
+ + + + + + + +
native fun spec_contains<K, V>(t: TableWithLength<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: TableWithLength<K, V>, k: K, v: V): TableWithLength<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: TableWithLength<K, V>, k: K): TableWithLength<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: TableWithLength<K, V>, k: K): V;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/type_info.md b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/type_info.md new file mode 100644 index 0000000000000..5bcdfa31025a0 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/compiler-v2-doc/type_info.md @@ -0,0 +1,397 @@ + + + +# Module `0x1::type_info` + + + +- [Struct `TypeInfo`](#0x1_type_info_TypeInfo) +- [Constants](#@Constants_0) +- [Function `account_address`](#0x1_type_info_account_address) +- [Function `module_name`](#0x1_type_info_module_name) +- [Function `struct_name`](#0x1_type_info_struct_name) +- [Function `chain_id`](#0x1_type_info_chain_id) +- [Function `type_of`](#0x1_type_info_type_of) +- [Function `type_name`](#0x1_type_info_type_name) +- [Function `chain_id_internal`](#0x1_type_info_chain_id_internal) +- [Function `size_of_val`](#0x1_type_info_size_of_val) +- [Specification](#@Specification_1) + - [Function `chain_id`](#@Specification_1_chain_id) + - [Function `type_of`](#@Specification_1_type_of) + - [Function `type_name`](#@Specification_1_type_name) + - [Function `chain_id_internal`](#@Specification_1_chain_id_internal) + - [Function `size_of_val`](#@Specification_1_size_of_val) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::features;
+use 0x1::string;
+
+ + + + + +## Struct `TypeInfo` + + + +
struct TypeInfo has copy, drop, store
+
+ + + +
+Fields + + +
+
+account_address: address +
+
+ +
+
+module_name: vector<u8> +
+
+ +
+
+struct_name: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 1;
+
+ + + + + +## Function `account_address` + + + +
public fun account_address(self: &type_info::TypeInfo): address
+
+ + + +
+Implementation + + +
public fun account_address(self: &TypeInfo): address {
+    self.account_address
+}
+
+ + + +
+ + + +## Function `module_name` + + + +
public fun module_name(self: &type_info::TypeInfo): vector<u8>
+
+ + + +
+Implementation + + +
public fun module_name(self: &TypeInfo): vector<u8> {
+    self.module_name
+}
+
+ + + +
+ + + +## Function `struct_name` + + + +
public fun struct_name(self: &type_info::TypeInfo): vector<u8>
+
+ + + +
+Implementation + + +
public fun struct_name(self: &TypeInfo): vector<u8> {
+    self.struct_name
+}
+
+ + + +
+ + + +## Function `chain_id` + +Returns the current chain ID, mirroring what aptos_framework::chain_id::get() would return, except in #[test] +functions, where this will always return 4u8 as the chain ID, whereas aptos_framework::chain_id::get() will +return whichever ID was passed to aptos_framework::chain_id::initialize_for_test(). + + +
public fun chain_id(): u8
+
+ + + +
+Implementation + + +
public fun chain_id(): u8 {
+    if (!features::aptos_stdlib_chain_id_enabled()) {
+        abort(std::error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE))
+    };
+
+    chain_id_internal()
+}
+
+ + + +
+ + + +## Function `type_of` + +Return the TypeInfo struct containing for the type T. + + +
public fun type_of<T>(): type_info::TypeInfo
+
+ + + +
+Implementation + + +
public native fun type_of<T>(): TypeInfo;
+
+ + + +
+ + + +## Function `type_name` + +Return the human readable string for the type, including the address, module name, and any type arguments. +Example: 0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin> +Or: 0x1::table::Table<0x1::string::String, 0x1::string::String> + + +
public fun type_name<T>(): string::String
+
+ + + +
+Implementation + + +
public native fun type_name<T>(): String;
+
+ + + +
+ + + +## Function `chain_id_internal` + + + +
fun chain_id_internal(): u8
+
+ + + +
+Implementation + + +
native fun chain_id_internal(): u8;
+
+ + + +
+ + + +## Function `size_of_val` + +Return the BCS size, in bytes, of value at val_ref. + +See the [BCS spec](https://github.com/diem/bcs) + +See test_size_of_val() for an analysis of common types and +nesting patterns, as well as test_size_of_val_vectors() for an +analysis of vector size dynamism. + + +
public fun size_of_val<T>(val_ref: &T): u64
+
+ + + +
+Implementation + + +
public fun size_of_val<T>(val_ref: &T): u64 {
+    bcs::serialized_size(val_ref)
+}
+
+ + + +
+ + + +## Specification + + + + + + +
native fun spec_is_struct<T>(): bool;
+
+ + + + + +### Function `chain_id` + + +
public fun chain_id(): u8
+
+ + + + +
aborts_if !features::spec_is_enabled(features::APTOS_STD_CHAIN_ID_NATIVES);
+ensures result == spec_chain_id_internal();
+
+ + + + + +### Function `type_of` + + +
public fun type_of<T>(): type_info::TypeInfo
+
+ + + + + + +### Function `type_name` + + +
public fun type_name<T>(): string::String
+
+ + + + + + +### Function `chain_id_internal` + + +
fun chain_id_internal(): u8
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_chain_id_internal();
+
+ + + + + + + +
fun spec_chain_id_internal(): u8;
+
+ + + + + + + +
fun spec_size_of_val<T>(val_ref: T): u64 {
+   len(std::bcs::serialize(val_ref))
+}
+
+ + + + + +### Function `size_of_val` + + +
public fun size_of_val<T>(val_ref: &T): u64
+
+ + + + +
ensures result == spec_size_of_val<T>(val_ref);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/doc/collection.md b/aptos-move/framework/aptos-token-objects/doc/collection.md index 3cb95eb99eef3..72376c6f6c0e5 100644 --- a/aptos-move/framework/aptos-token-objects/doc/collection.md +++ b/aptos-move/framework/aptos-token-objects/doc/collection.md @@ -39,9 +39,12 @@ require adding the field original_name. - [Struct `SetMaxSupply`](#0x4_collection_SetMaxSupply) - [Constants](#@Constants_0) - [Function `create_fixed_collection`](#0x4_collection_create_fixed_collection) +- [Function `create_fixed_collection_as_owner`](#0x4_collection_create_fixed_collection_as_owner) - [Function `create_unlimited_collection`](#0x4_collection_create_unlimited_collection) +- [Function `create_unlimited_collection_as_owner`](#0x4_collection_create_unlimited_collection_as_owner) - [Function `create_untracked_collection`](#0x4_collection_create_untracked_collection) - [Function `create_collection_internal`](#0x4_collection_create_collection_internal) +- [Function `enable_ungated_transfer`](#0x4_collection_enable_ungated_transfer) - [Function `create_collection_address`](#0x4_collection_create_collection_address) - [Function `create_collection_seed`](#0x4_collection_create_collection_seed) - [Function `increment_supply`](#0x4_collection_increment_supply) @@ -708,6 +711,16 @@ The collection name is over the maximum length + + +The collection owner feature is not supported + + +
const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 11;
+
+ + + The collection has reached its supply and no more tokens can be minted, unless some are burned @@ -793,7 +806,7 @@ The collection does not have a max supply Creates a fixed-sized collection, or a collection that supports a fixed amount of tokens. This is useful to create a guaranteed, limited supply on-chain digital asset. For example, a collection 1111 vicious vipers. Note, creating restrictions such as upward limits results -in data structures that prevent Supra from parallelizing mints of this collection type. +in data structures that prevent Aptos from parallelizing mints of this collection type. Beyond that, it adds supply tracking with events. @@ -837,6 +850,51 @@ Beyond that, it adds supply tracking with events. + + + + +## Function `create_fixed_collection_as_owner` + +Same functionality as create_fixed_collection, but the caller is the owner of the collection. +This means that the caller can transfer the collection to another address. +This transfers ownership and minting permissions to the new address. + + +
public fun create_fixed_collection_as_owner(creator: &signer, description: string::String, max_supply: u64, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_fixed_collection_as_owner(
+    creator: &signer,
+    description: String,
+    max_supply: u64,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+
+    let constructor_ref = create_fixed_collection(
+        creator,
+        description,
+        max_supply,
+        name,
+        royalty,
+        uri,
+    );
+    enable_ungated_transfer(&constructor_ref);
+    constructor_ref
+}
+
+ + +
@@ -885,6 +943,49 @@ the supply of tokens. + + + + +## Function `create_unlimited_collection_as_owner` + +Same functionality as create_unlimited_collection, but the caller is the owner of the collection. +This means that the caller can transfer the collection to another address. +This transfers ownership and minting permissions to the new address. + + +
public fun create_unlimited_collection_as_owner(creator: &signer, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_unlimited_collection_as_owner(
+    creator: &signer,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+
+    let constructor_ref = create_unlimited_collection(
+        creator,
+        description,
+        name,
+        royalty,
+        uri,
+    );
+    enable_ungated_transfer(&constructor_ref);
+    constructor_ref
+}
+
+ + +
@@ -892,7 +993,7 @@ the supply of tokens. ## Function `create_untracked_collection` Creates an untracked collection, or a collection that supports an arbitrary amount of -tokens. This is useful for mass airdrops that fully leverage Supra parallelization. +tokens. This is useful for mass airdrops that fully leverage Aptos parallelization. TODO: Hide this until we bring back meaningful way to enforce burns @@ -989,6 +1090,31 @@ TODO: Hide this until we bring back meaningful way to enforce burns + + + + +## Function `enable_ungated_transfer` + + + +
fun enable_ungated_transfer(constructor_ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
inline fun enable_ungated_transfer(constructor_ref: &ConstructorRef) {
+    let transfer_ref = object::generate_transfer_ref(constructor_ref);
+    object::enable_ungated_transfer(&transfer_ref);
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-token-objects/doc/token.md b/aptos-move/framework/aptos-token-objects/doc/token.md index 4f405dbc13d3c..0e27044a8ef8b 100644 --- a/aptos-move/framework/aptos-token-objects/doc/token.md +++ b/aptos-move/framework/aptos-token-objects/doc/token.md @@ -3,7 +3,7 @@ # Module `0x4::token` -This defines an object-based Token. The key differentiating features from the Supra standard +This defines an object-based Token. The key differentiating features from the Aptos standard token are: * Decoupled token ownership from token data. * Explicit data model for token metadata via adjacent resources @@ -20,13 +20,19 @@ token are: - [Constants](#@Constants_0) - [Function `create_common`](#0x4_token_create_common) - [Function `create_common_with_collection`](#0x4_token_create_common_with_collection) +- [Function `create_common_with_collection_as_owner`](#0x4_token_create_common_with_collection_as_owner) +- [Function `create_common_with_collection_internal`](#0x4_token_create_common_with_collection_internal) - [Function `create_token`](#0x4_token_create_token) - [Function `create`](#0x4_token_create) +- [Function `create_token_as_collection_owner`](#0x4_token_create_token_as_collection_owner) - [Function `create_numbered_token_object`](#0x4_token_create_numbered_token_object) - [Function `create_numbered_token`](#0x4_token_create_numbered_token) +- [Function `create_numbered_token_as_collection_owner`](#0x4_token_create_numbered_token_as_collection_owner) - [Function `create_named_token_object`](#0x4_token_create_named_token_object) - [Function `create_named_token`](#0x4_token_create_named_token) +- [Function `create_named_token_as_collection_owner`](#0x4_token_create_named_token_as_collection_owner) - [Function `create_named_token_from_seed`](#0x4_token_create_named_token_from_seed) +- [Function `create_named_token_from_seed_as_collection_owner`](#0x4_token_create_named_token_from_seed_as_collection_owner) - [Function `create_from_account`](#0x4_token_create_from_account) - [Function `create_token_address`](#0x4_token_create_token_address) - [Function `create_token_address_with_seed`](#0x4_token_create_token_address_with_seed) @@ -113,7 +119,7 @@ Represents the common fields to all tokens. Was populated until concurrent_token_v2_enabled feature flag was enabled. The name of the token, which should be unique within the collection; the length of name - should be smaller than 128, characters, eg: "Supra Animal #1234" + should be smaller than 128, characters, eg: "Aptos Animal #1234"
uri: string::String @@ -163,7 +169,7 @@ Started being populated once aggregator_v2_api_enabled was enabled.
The name of the token, which should be unique within the collection; the length of name - should be smaller than 128, characters, eg: "Supra Animal #1234" + should be smaller than 128, characters, eg: "Aptos Animal #1234"
@@ -380,6 +386,26 @@ The URI is over the maximum length + + +The calling signer is not the owner + + +
const ENOT_OWNER: u64 = 8;
+
+ + + + + +The collection owner feature is not supported + + +
const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 9;
+
+ + + The description is over the maximum length @@ -544,6 +570,94 @@ The token name is over the maximum length ) { assert!(collection::creator(collection) == signer::address_of(creator), error::unauthenticated(ENOT_CREATOR)); + create_common_with_collection_internal( + constructor_ref, + collection, + description, + name_prefix, + name_with_index_suffix, + royalty, + uri + ); +} +
+ + + + + + + +## Function `create_common_with_collection_as_owner` + + + +
fun create_common_with_collection_as_owner(owner: &signer, constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection_as_owner(
+    owner: &signer,
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+    assert!(object::owner(collection) == signer::address_of(owner), error::unauthenticated(ENOT_OWNER));
+
+    create_common_with_collection_internal(
+        constructor_ref,
+        collection,
+        description,
+        name_prefix,
+        name_with_index_suffix,
+        royalty,
+        uri
+    );
+}
+
+ + + +
+ + + +## Function `create_common_with_collection_internal` + + + +
fun create_common_with_collection_internal(constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection_internal(
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
     if (option::is_some(&name_with_index_suffix)) {
         // Be conservative, as we don't know what length the index will be, and assume worst case (20 chars in MAX_U64)
         assert!(
@@ -691,6 +805,50 @@ for additional specialization.
 
 
 
+
+ + + +## Function `create_token_as_collection_owner` + +Same functionality as create_token, but the token can only be created by the collection owner. + + +
public fun create_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -792,6 +950,51 @@ while providing sequential names. + + + + +## Function `create_numbered_token_as_collection_owner` + +Same functionality as create_numbered_token_object, but the token can only be created by the collection owner. + + +
public fun create_numbered_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name_with_index_prefix: string::String, name_with_index_suffix: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_numbered_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name_with_index_prefix: String,
+    name_with_index_suffix: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name_with_index_prefix,
+        option::some(name_with_index_suffix),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -884,6 +1087,50 @@ additional specialization. + + + + +## Function `create_named_token_as_collection_owner` + +Same functionality as create_named_token_object, but the token can only be created by the collection owner. + + +
public fun create_named_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_seed(&collection::name(collection), &name);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -922,6 +1169,51 @@ This function must be called if the collection name has been previously changed. + + + + +## Function `create_named_token_from_seed_as_collection_owner` + +Same functionality as create_named_token_from_seed, but the token can only be created by the collection owner. + + +
public fun create_named_token_from_seed_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, seed: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_from_seed_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    seed: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_name_with_seed(&collection::name(collection), &name, &seed);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-token-objects/sources/collection.move b/aptos-move/framework/aptos-token-objects/sources/collection.move index 7fd1eac1b559a..86b0930466acd 100644 --- a/aptos-move/framework/aptos-token-objects/sources/collection.move +++ b/aptos-move/framework/aptos-token-objects/sources/collection.move @@ -18,6 +18,7 @@ /// * Add aggregator support when added to framework module aptos_token_objects::collection { use std::error; + use std::features; use std::option::{Self, Option}; use std::signer; use std::string::{Self, String}; @@ -49,6 +50,8 @@ module aptos_token_objects::collection { const EINVALID_MAX_SUPPLY: u64 = 9; /// The collection does not have a max supply const ENO_MAX_SUPPLY_IN_COLLECTION: u64 = 10; + /// The collection owner feature is not supported + const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 11; const MAX_COLLECTION_NAME_LENGTH: u64 = 128; const MAX_URI_LENGTH: u64 = 512; @@ -210,6 +213,31 @@ module aptos_token_objects::collection { ) } + /// Same functionality as `create_fixed_collection`, but the caller is the owner of the collection. + /// This means that the caller can transfer the collection to another address. + /// This transfers ownership and minting permissions to the new address. + public fun create_fixed_collection_as_owner( + creator: &signer, + description: String, + max_supply: u64, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED)); + + let constructor_ref = create_fixed_collection( + creator, + description, + max_supply, + name, + royalty, + uri, + ); + enable_ungated_transfer(&constructor_ref); + constructor_ref + } + /// Creates an unlimited collection. This has support for supply tracking but does not limit /// the supply of tokens. public fun create_unlimited_collection( @@ -238,6 +266,29 @@ module aptos_token_objects::collection { ) } + /// Same functionality as `create_unlimited_collection`, but the caller is the owner of the collection. + /// This means that the caller can transfer the collection to another address. + /// This transfers ownership and minting permissions to the new address. + public fun create_unlimited_collection_as_owner( + creator: &signer, + description: String, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED)); + + let constructor_ref = create_unlimited_collection( + creator, + description, + name, + royalty, + uri, + ); + enable_ungated_transfer(&constructor_ref); + constructor_ref + } + /// Creates an untracked collection, or a collection that supports an arbitrary amount of /// tokens. This is useful for mass airdrops that fully leverage Aptos parallelization. /// TODO: Hide this until we bring back meaningful way to enforce burns @@ -302,6 +353,11 @@ module aptos_token_objects::collection { constructor_ref } + inline fun enable_ungated_transfer(constructor_ref: &ConstructorRef) { + let transfer_ref = object::generate_transfer_ref(constructor_ref); + object::enable_ungated_transfer(&transfer_ref); + } + /// Generates the collections address based upon the creators address and the collection's name public fun create_collection_address(creator: &address, name: &String): address { object::create_object_address(creator, create_collection_seed(name)) @@ -753,6 +809,7 @@ module aptos_token_objects::collection { #[expected_failure(abort_code = 0x50003, location = supra_framework::object)] entry fun test_create_and_transfer(creator: &signer, trader: &signer) { let creator_address = signer::address_of(creator); + let trader_address = signer::address_of(trader); let collection_name = string::utf8(b"collection name"); create_collection_helper(creator, collection_name); @@ -760,7 +817,24 @@ module aptos_token_objects::collection { create_collection_address(&creator_address, &collection_name), ); assert!(object::owner(collection) == creator_address, 1); - object::transfer(creator, collection, signer::address_of(trader)); + object::transfer(creator, collection, trader_address); + } + + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + entry fun test_create_and_transfer_as_owner(creator: &signer, trader: &signer, supra_framework: &signer) { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let creator_address = signer::address_of(creator); + let trader_address = signer::address_of(trader); + let collection_name = string::utf8(b"collection name"); + create_unlimited_collection_as_owner_helper(creator, collection_name); + + let collection = object::address_to_object( + create_collection_address(&creator_address, &collection_name), + ); + assert!(object::owner(collection) == creator_address, 1); + // Transferring owned collections are allowed + object::transfer(creator, collection, trader_address); + assert!(object::owner(collection) == trader_address, 1); } #[test(creator = @0x123)] @@ -945,6 +1019,17 @@ module aptos_token_objects::collection { ) } + #[test_only] + fun create_unlimited_collection_as_owner_helper(creator: &signer, name: String): ConstructorRef { + create_unlimited_collection_as_owner( + creator, + string::utf8(b"description"), + name, + option::none(), + string::utf8(b"uri"), + ) + } + #[test_only] /// Create a token as we cannot create a dependency cycle between collection and token modules. fun create_token(creator: &signer): signer { diff --git a/aptos-move/framework/aptos-token-objects/sources/token.move b/aptos-move/framework/aptos-token-objects/sources/token.move index 06bd1d44066e4..0d7c31bafa6ee 100644 --- a/aptos-move/framework/aptos-token-objects/sources/token.move +++ b/aptos-move/framework/aptos-token-objects/sources/token.move @@ -6,6 +6,7 @@ /// module aptos_token_objects::token { use std::error; + use std::features; use std::option::{Self, Option}; use std::string::{Self, String}; use std::signer; @@ -33,6 +34,10 @@ module aptos_token_objects::token { const EDESCRIPTION_TOO_LONG: u64 = 6; /// The seed is over the maximum length const ESEED_TOO_LONG: u64 = 7; + /// The calling signer is not the owner + const ENOT_OWNER: u64 = 8; + /// The collection owner feature is not supported + const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 9; const MAX_TOKEN_NAME_LENGTH: u64 = 128; const MAX_TOKEN_SEED_LENGTH: u64 = 128; @@ -156,6 +161,54 @@ module aptos_token_objects::token { ) { assert!(collection::creator(collection) == signer::address_of(creator), error::unauthenticated(ENOT_CREATOR)); + create_common_with_collection_internal( + constructor_ref, + collection, + description, + name_prefix, + name_with_index_suffix, + royalty, + uri + ); + } + + inline fun create_common_with_collection_as_owner( + owner: &signer, + constructor_ref: &ConstructorRef, + collection: Object, + description: String, + name_prefix: String, + // If option::some, numbered token is created - i.e. index is appended to the name. + // If option::none, name_prefix is the full name of the token. + name_with_index_suffix: Option, + royalty: Option, + uri: String, + ) { + assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED)); + assert!(object::owner(collection) == signer::address_of(owner), error::unauthenticated(ENOT_OWNER)); + + create_common_with_collection_internal( + constructor_ref, + collection, + description, + name_prefix, + name_with_index_suffix, + royalty, + uri + ); + } + + inline fun create_common_with_collection_internal( + constructor_ref: &ConstructorRef, + collection: Object, + description: String, + name_prefix: String, + // If option::some, numbered token is created - i.e. index is appended to the name. + // If option::none, name_prefix is the full name of the token. + name_with_index_suffix: Option, + royalty: Option, + uri: String, + ) { if (option::is_some(&name_with_index_suffix)) { // Be conservative, as we don't know what length the index will be, and assume worst case (20 chars in MAX_U64) assert!( @@ -260,6 +313,30 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_token`, but the token can only be created by the collection owner. + public fun create_token_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let creator_address = signer::address_of(creator); + let constructor_ref = object::create_object(creator_address); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name, + option::none(), + royalty, + uri + ); + constructor_ref + } + /// Creates a new token object with a unique address and returns the ConstructorRef /// for additional specialization. /// The name is created by concatenating the (name_prefix, index, name_suffix). @@ -321,6 +398,31 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_numbered_token_object`, but the token can only be created by the collection owner. + public fun create_numbered_token_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name_with_index_prefix: String, + name_with_index_suffix: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let creator_address = signer::address_of(creator); + let constructor_ref = object::create_object(creator_address); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name_with_index_prefix, + option::some(name_with_index_suffix), + royalty, + uri + ); + constructor_ref + } + /// Creates a new token object from a token name and returns the ConstructorRef for /// additional specialization. /// This function must be called if the collection name has been previously changed. @@ -373,6 +475,30 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_named_token_object`, but the token can only be created by the collection owner. + public fun create_named_token_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let seed = create_token_seed(&collection::name(collection), &name); + let constructor_ref = object::create_named_object(creator, seed); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name, + option::none(), + royalty, + uri + ); + constructor_ref + } + /// Creates a new token object from a token name and seed. /// Returns the ConstructorRef for additional specialization. /// This function must be called if the collection name has been previously changed. @@ -391,6 +517,31 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_named_token_from_seed`, but the token can only be created by the collection owner. + public fun create_named_token_from_seed_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name: String, + seed: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let seed = create_token_name_with_seed(&collection::name(collection), &name, &seed); + let constructor_ref = object::create_named_object(creator, seed); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name, + option::none(), + royalty, + uri + ); + constructor_ref + } + #[deprecated] /// DEPRECATED: Use `create` instead for identical behavior. /// @@ -721,6 +872,27 @@ module aptos_token_objects::token { assert!(option::some(expected_royalty) == royalty(token), 2); } + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + fun test_create_and_transfer_token_as_collection_owner(creator: &signer, trader: &signer, supra_framework: &signer) acquires Token { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let collection_name = string::utf8(b"collection name"); + let token_name = string::utf8(b"token name"); + + let extend_ref = create_collection_as_collection_owner_helper(creator, collection_name, 1); + let collection = get_collection_from_ref(&extend_ref); + create_named_token_as_collection_owner_helper(creator, collection, token_name); + + let creator_address = signer::address_of(creator); + let token_addr = create_token_address(&creator_address, &collection_name, &token_name); + let token = object::address_to_object(token_addr); + assert!(object::owner(token) == creator_address, 1); + object::transfer(creator, token, signer::address_of(trader)); + assert!(object::owner(token) == signer::address_of(trader), 1); + + let expected_royalty = royalty::create(25, 10000, creator_address); + assert!(option::some(expected_royalty) == royalty(token), 2); + } + #[test(creator = @0x123, trader = @0x456)] #[expected_failure(abort_code = 0x40002, location = aptos_token_objects::token)] fun test_create_token_non_creator(creator: &signer, trader: &signer) { @@ -732,6 +904,18 @@ module aptos_token_objects::token { ); } + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_token_non_collection_owner(creator: &signer, trader: &signer, supra_framework: &signer) { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_token_as_collection_owner( + trader, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + #[test(creator = @0x123, trader = @0x456)] #[expected_failure(abort_code = 0x40002, location = aptos_token_objects::token)] fun test_create_named_token_non_creator(creator: &signer, trader: &signer) { @@ -740,6 +924,15 @@ module aptos_token_objects::token { create_token_with_collection_helper(trader, collection, string::utf8(b"token name")); } + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_named_token_non_collection_owner(creator: &signer, trader: &signer, supra_framework: &signer) { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_named_token_as_collection_owner_helper(trader, collection, string::utf8(b"token name")); + } + #[test(creator = @0x123, trader = @0x456)] #[expected_failure(abort_code = 0x40002, location = aptos_token_objects::token)] fun test_create_named_token_object_non_creator(creator: &signer, trader: &signer) { @@ -762,6 +955,18 @@ module aptos_token_objects::token { ); } + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_named_token_from_seed_non_collection_owner(creator: &signer, trader: &signer, supra_framework: &signer) { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_named_token_as_collection_owner( + trader, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + #[test(creator = @0x123, trader = @0x456)] fun test_create_and_transfer_token_with_seed(creator: &signer, trader: &signer) acquires Token { let collection_name = string::utf8(b"collection name"); @@ -784,6 +989,45 @@ module aptos_token_objects::token { assert!(option::some(expected_royalty) == royalty(token), 2); } + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_token_after_transferring_collection(creator: &signer, trader: &signer, supra_framework: &signer) { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_token_as_collection_owner( + creator, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + + object::transfer(creator, collection, signer::address_of(trader)); + + // This should fail as the collection is no longer owned by the creator. + create_token_as_collection_owner( + creator, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + + #[test(creator = @0x123, trader = @0x456, supra_framework = @supra_framework)] + fun create_token_works_with_new_collection_owner(creator: &signer, trader: &signer, supra_framework: &signer) { + features::change_feature_flags_for_testing(supra_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_token_as_collection_owner( + creator, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + + object::transfer(creator, collection, signer::address_of(trader)); + + // This should pass as `trader` is the new collection owner + create_token_as_collection_owner( + trader, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + #[test(creator = @0x123)] fun test_collection_royalty(creator: &signer) acquires Token { let collection_name = string::utf8(b"collection name"); @@ -1059,6 +1303,12 @@ module aptos_token_objects::token { object::generate_extend_ref(&constructor_ref) } + #[test_only] + fun create_collection_as_collection_owner_helper(creator: &signer, collection_name: String, max_supply: u64): ExtendRef { + let constructor_ref = create_fixed_collection_as_collection_owner(creator, collection_name, max_supply); + object::generate_extend_ref(&constructor_ref) + } + #[test_only] fun create_fixed_collection(creator: &signer, collection_name: String, max_supply: u64): ConstructorRef { collection::create_fixed_collection( @@ -1071,6 +1321,22 @@ module aptos_token_objects::token { ) } + #[test_only] + fun create_fixed_collection_as_collection_owner( + creator: &signer, + collection_name: String, + max_supply: u64, + ): ConstructorRef { + collection::create_fixed_collection_as_owner( + creator, + string::utf8(b"collection description as owner"), + max_supply, + collection_name, + option::none(), + string::utf8(b"collection uri as owner"), + ) + } + #[test_only] fun create_token_helper(creator: &signer, collection_name: String, token_name: String): ConstructorRef { create_named_token( @@ -1084,7 +1350,11 @@ module aptos_token_objects::token { } #[test_only] - fun create_token_with_collection_helper(creator: &signer, collection: Object, token_name: String): ConstructorRef { + fun create_token_with_collection_helper( + creator: &signer, + collection: Object, + token_name: String + ): ConstructorRef { create_named_token_object( creator, collection, @@ -1096,7 +1366,28 @@ module aptos_token_objects::token { } #[test_only] - fun create_token_object_with_seed_helper(creator: &signer, collection: Object, token_name: String, seed: String): ConstructorRef { + fun create_named_token_as_collection_owner_helper( + creator: &signer, + collection: Object, + token_name: String + ): ConstructorRef { + create_named_token_as_collection_owner( + creator, + collection, + string::utf8(b"token description"), + token_name, + option::some(royalty::create(25, 10000, signer::address_of(creator))), + string::utf8(b"uri"), + ) + } + + #[test_only] + fun create_token_object_with_seed_helper( + creator: &signer, + collection: Object, + token_name: String, + seed: String + ): ConstructorRef { create_named_token_from_seed( creator, collection, @@ -1109,7 +1400,11 @@ module aptos_token_objects::token { } #[test_only] - fun create_numbered_token_helper(creator: &signer, collection: Object, token_prefix: String): ConstructorRef { + fun create_numbered_token_helper( + creator: &signer, + collection: Object, + token_prefix: String + ): ConstructorRef { create_numbered_token_object( creator, collection, diff --git a/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/aptos_token.md b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/aptos_token.md new file mode 100644 index 0000000000000..c7c9f64e1dfca --- /dev/null +++ b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/aptos_token.md @@ -0,0 +1,1703 @@ + + + +# Module `0x4::aptos_token` + +This defines a minimally viable token for no-code solutions akin to the original token at +0x3::token module. +The key features are: +* Base token and collection features +* Creator definable mutability for tokens +* Creator-based freezing of tokens +* Standard object-based transfer and events +* Metadata property type + + +- [Resource `AptosCollection`](#0x4_aptos_token_AptosCollection) +- [Resource `AptosToken`](#0x4_aptos_token_AptosToken) +- [Constants](#@Constants_0) +- [Function `create_collection`](#0x4_aptos_token_create_collection) +- [Function `create_collection_object`](#0x4_aptos_token_create_collection_object) +- [Function `mint`](#0x4_aptos_token_mint) +- [Function `mint_token_object`](#0x4_aptos_token_mint_token_object) +- [Function `mint_soul_bound`](#0x4_aptos_token_mint_soul_bound) +- [Function `mint_soul_bound_token_object`](#0x4_aptos_token_mint_soul_bound_token_object) +- [Function `mint_internal`](#0x4_aptos_token_mint_internal) +- [Function `borrow`](#0x4_aptos_token_borrow) +- [Function `are_properties_mutable`](#0x4_aptos_token_are_properties_mutable) +- [Function `is_burnable`](#0x4_aptos_token_is_burnable) +- [Function `is_freezable_by_creator`](#0x4_aptos_token_is_freezable_by_creator) +- [Function `is_mutable_description`](#0x4_aptos_token_is_mutable_description) +- [Function `is_mutable_name`](#0x4_aptos_token_is_mutable_name) +- [Function `is_mutable_uri`](#0x4_aptos_token_is_mutable_uri) +- [Function `authorized_borrow`](#0x4_aptos_token_authorized_borrow) +- [Function `burn`](#0x4_aptos_token_burn) +- [Function `freeze_transfer`](#0x4_aptos_token_freeze_transfer) +- [Function `unfreeze_transfer`](#0x4_aptos_token_unfreeze_transfer) +- [Function `set_description`](#0x4_aptos_token_set_description) +- [Function `set_name`](#0x4_aptos_token_set_name) +- [Function `set_uri`](#0x4_aptos_token_set_uri) +- [Function `add_property`](#0x4_aptos_token_add_property) +- [Function `add_typed_property`](#0x4_aptos_token_add_typed_property) +- [Function `remove_property`](#0x4_aptos_token_remove_property) +- [Function `update_property`](#0x4_aptos_token_update_property) +- [Function `update_typed_property`](#0x4_aptos_token_update_typed_property) +- [Function `collection_object`](#0x4_aptos_token_collection_object) +- [Function `borrow_collection`](#0x4_aptos_token_borrow_collection) +- [Function `is_mutable_collection_description`](#0x4_aptos_token_is_mutable_collection_description) +- [Function `is_mutable_collection_royalty`](#0x4_aptos_token_is_mutable_collection_royalty) +- [Function `is_mutable_collection_uri`](#0x4_aptos_token_is_mutable_collection_uri) +- [Function `is_mutable_collection_token_description`](#0x4_aptos_token_is_mutable_collection_token_description) +- [Function `is_mutable_collection_token_name`](#0x4_aptos_token_is_mutable_collection_token_name) +- [Function `is_mutable_collection_token_uri`](#0x4_aptos_token_is_mutable_collection_token_uri) +- [Function `is_mutable_collection_token_properties`](#0x4_aptos_token_is_mutable_collection_token_properties) +- [Function `are_collection_tokens_burnable`](#0x4_aptos_token_are_collection_tokens_burnable) +- [Function `are_collection_tokens_freezable`](#0x4_aptos_token_are_collection_tokens_freezable) +- [Function `authorized_borrow_collection`](#0x4_aptos_token_authorized_borrow_collection) +- [Function `set_collection_description`](#0x4_aptos_token_set_collection_description) +- [Function `set_collection_royalties`](#0x4_aptos_token_set_collection_royalties) +- [Function `set_collection_royalties_call`](#0x4_aptos_token_set_collection_royalties_call) +- [Function `set_collection_uri`](#0x4_aptos_token_set_collection_uri) + + +
use 0x1::error;
+use 0x1::object;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x4::collection;
+use 0x4::property_map;
+use 0x4::royalty;
+use 0x4::token;
+
+ + + + + +## Resource `AptosCollection` + +Storage state for managing the no-code Collection. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct AptosCollection has key
+
+ + + +
+Fields + + +
+
+mutator_ref: option::Option<collection::MutatorRef> +
+
+ Used to mutate collection fields +
+
+royalty_mutator_ref: option::Option<royalty::MutatorRef> +
+
+ Used to mutate royalties +
+
+mutable_description: bool +
+
+ Determines if the creator can mutate the collection's description +
+
+mutable_uri: bool +
+
+ Determines if the creator can mutate the collection's uri +
+
+mutable_token_description: bool +
+
+ Determines if the creator can mutate token descriptions +
+
+mutable_token_name: bool +
+
+ Determines if the creator can mutate token names +
+
+mutable_token_properties: bool +
+
+ Determines if the creator can mutate token properties +
+
+mutable_token_uri: bool +
+
+ Determines if the creator can mutate token uris +
+
+tokens_burnable_by_creator: bool +
+
+ Determines if the creator can burn tokens +
+
+tokens_freezable_by_creator: bool +
+
+ Determines if the creator can freeze tokens +
+
+ + +
+ + + +## Resource `AptosToken` + +Storage state for managing the no-code Token. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct AptosToken has key
+
+ + + +
+Fields + + +
+
+burn_ref: option::Option<token::BurnRef> +
+
+ Used to burn. +
+
+transfer_ref: option::Option<object::TransferRef> +
+
+ Used to control freeze. +
+
+mutator_ref: option::Option<token::MutatorRef> +
+
+ Used to mutate fields +
+
+property_mutator_ref: property_map::MutatorRef +
+
+ Used to mutate properties +
+
+ + +
+ + + +## Constants + + + + +The collection does not exist + + +
const ECOLLECTION_DOES_NOT_EXIST: u64 = 1;
+
+ + + + + +The field being changed is not mutable + + +
const EFIELD_NOT_MUTABLE: u64 = 4;
+
+ + + + + +The provided signer is not the creator + + +
const ENOT_CREATOR: u64 = 3;
+
+ + + + + +The token does not exist + + +
const ETOKEN_DOES_NOT_EXIST: u64 = 2;
+
+ + + + + +The property map being mutated is not mutable + + +
const EPROPERTIES_NOT_MUTABLE: u64 = 6;
+
+ + + + + +The token being burned is not burnable + + +
const ETOKEN_NOT_BURNABLE: u64 = 5;
+
+ + + + + +## Function `create_collection` + +Create a new collection + + +
public entry fun create_collection(creator: &signer, description: string::String, max_supply: u64, name: string::String, uri: string::String, mutable_description: bool, mutable_royalty: bool, mutable_uri: bool, mutable_token_description: bool, mutable_token_name: bool, mutable_token_properties: bool, mutable_token_uri: bool, tokens_burnable_by_creator: bool, tokens_freezable_by_creator: bool, royalty_numerator: u64, royalty_denominator: u64)
+
+ + + +
+Implementation + + +
public entry fun create_collection(
+    creator: &signer,
+    description: String,
+    max_supply: u64,
+    name: String,
+    uri: String,
+    mutable_description: bool,
+    mutable_royalty: bool,
+    mutable_uri: bool,
+    mutable_token_description: bool,
+    mutable_token_name: bool,
+    mutable_token_properties: bool,
+    mutable_token_uri: bool,
+    tokens_burnable_by_creator: bool,
+    tokens_freezable_by_creator: bool,
+    royalty_numerator: u64,
+    royalty_denominator: u64,
+) {
+    create_collection_object(
+        creator,
+        description,
+        max_supply,
+        name,
+        uri,
+        mutable_description,
+        mutable_royalty,
+        mutable_uri,
+        mutable_token_description,
+        mutable_token_name,
+        mutable_token_properties,
+        mutable_token_uri,
+        tokens_burnable_by_creator,
+        tokens_freezable_by_creator,
+        royalty_numerator,
+        royalty_denominator
+    );
+}
+
+ + + +
+ + + +## Function `create_collection_object` + + + +
public fun create_collection_object(creator: &signer, description: string::String, max_supply: u64, name: string::String, uri: string::String, mutable_description: bool, mutable_royalty: bool, mutable_uri: bool, mutable_token_description: bool, mutable_token_name: bool, mutable_token_properties: bool, mutable_token_uri: bool, tokens_burnable_by_creator: bool, tokens_freezable_by_creator: bool, royalty_numerator: u64, royalty_denominator: u64): object::Object<aptos_token::AptosCollection>
+
+ + + +
+Implementation + + +
public fun create_collection_object(
+    creator: &signer,
+    description: String,
+    max_supply: u64,
+    name: String,
+    uri: String,
+    mutable_description: bool,
+    mutable_royalty: bool,
+    mutable_uri: bool,
+    mutable_token_description: bool,
+    mutable_token_name: bool,
+    mutable_token_properties: bool,
+    mutable_token_uri: bool,
+    tokens_burnable_by_creator: bool,
+    tokens_freezable_by_creator: bool,
+    royalty_numerator: u64,
+    royalty_denominator: u64,
+): Object<AptosCollection> {
+    let creator_addr = signer::address_of(creator);
+    let royalty = royalty::create(royalty_numerator, royalty_denominator, creator_addr);
+    let constructor_ref = collection::create_fixed_collection(
+        creator,
+        description,
+        max_supply,
+        name,
+        option::some(royalty),
+        uri,
+    );
+
+    let object_signer = object::generate_signer(&constructor_ref);
+    let mutator_ref = if (mutable_description || mutable_uri) {
+        option::some(collection::generate_mutator_ref(&constructor_ref))
+    } else {
+        option::none()
+    };
+
+    let royalty_mutator_ref = if (mutable_royalty) {
+        option::some(royalty::generate_mutator_ref(object::generate_extend_ref(&constructor_ref)))
+    } else {
+        option::none()
+    };
+
+    let aptos_collection = AptosCollection {
+        mutator_ref,
+        royalty_mutator_ref,
+        mutable_description,
+        mutable_uri,
+        mutable_token_description,
+        mutable_token_name,
+        mutable_token_properties,
+        mutable_token_uri,
+        tokens_burnable_by_creator,
+        tokens_freezable_by_creator,
+    };
+    move_to(&object_signer, aptos_collection);
+    object::object_from_constructor_ref(&constructor_ref)
+}
+
+ + + +
+ + + +## Function `mint` + +With an existing collection, directly mint a viable token into the creators account. + + +
public entry fun mint(creator: &signer, collection: string::String, description: string::String, name: string::String, uri: string::String, property_keys: vector<string::String>, property_types: vector<string::String>, property_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun mint(
+    creator: &signer,
+    collection: String,
+    description: String,
+    name: String,
+    uri: String,
+    property_keys: vector<String>,
+    property_types: vector<String>,
+    property_values: vector<vector<u8>>,
+) acquires AptosCollection, AptosToken {
+    mint_token_object(creator, collection, description, name, uri, property_keys, property_types, property_values);
+}
+
+ + + +
+ + + +## Function `mint_token_object` + +Mint a token into an existing collection, and retrieve the object / address of the token. + + +
public fun mint_token_object(creator: &signer, collection: string::String, description: string::String, name: string::String, uri: string::String, property_keys: vector<string::String>, property_types: vector<string::String>, property_values: vector<vector<u8>>): object::Object<aptos_token::AptosToken>
+
+ + + +
+Implementation + + +
public fun mint_token_object(
+    creator: &signer,
+    collection: String,
+    description: String,
+    name: String,
+    uri: String,
+    property_keys: vector<String>,
+    property_types: vector<String>,
+    property_values: vector<vector<u8>>,
+): Object<AptosToken> acquires AptosCollection, AptosToken {
+    let constructor_ref = mint_internal(
+        creator,
+        collection,
+        description,
+        name,
+        uri,
+        property_keys,
+        property_types,
+        property_values,
+    );
+
+    let collection = collection_object(creator, &collection);
+
+    // If tokens are freezable, add a transfer ref to be able to freeze transfers
+    let freezable_by_creator = are_collection_tokens_freezable(collection);
+    if (freezable_by_creator) {
+        let aptos_token_addr = object::address_from_constructor_ref(&constructor_ref);
+        let aptos_token = borrow_global_mut<AptosToken>(aptos_token_addr);
+        let transfer_ref = object::generate_transfer_ref(&constructor_ref);
+        option::fill(&mut aptos_token.transfer_ref, transfer_ref);
+    };
+
+    object::object_from_constructor_ref(&constructor_ref)
+}
+
+ + + +
+ + + +## Function `mint_soul_bound` + +With an existing collection, directly mint a soul bound token into the recipient's account. + + +
public entry fun mint_soul_bound(creator: &signer, collection: string::String, description: string::String, name: string::String, uri: string::String, property_keys: vector<string::String>, property_types: vector<string::String>, property_values: vector<vector<u8>>, soul_bound_to: address)
+
+ + + +
+Implementation + + +
public entry fun mint_soul_bound(
+    creator: &signer,
+    collection: String,
+    description: String,
+    name: String,
+    uri: String,
+    property_keys: vector<String>,
+    property_types: vector<String>,
+    property_values: vector<vector<u8>>,
+    soul_bound_to: address,
+) acquires AptosCollection {
+    mint_soul_bound_token_object(
+        creator,
+        collection,
+        description,
+        name,
+        uri,
+        property_keys,
+        property_types,
+        property_values,
+        soul_bound_to
+    );
+}
+
+ + + +
+ + + +## Function `mint_soul_bound_token_object` + +With an existing collection, directly mint a soul bound token into the recipient's account. + + +
public fun mint_soul_bound_token_object(creator: &signer, collection: string::String, description: string::String, name: string::String, uri: string::String, property_keys: vector<string::String>, property_types: vector<string::String>, property_values: vector<vector<u8>>, soul_bound_to: address): object::Object<aptos_token::AptosToken>
+
+ + + +
+Implementation + + +
public fun mint_soul_bound_token_object(
+    creator: &signer,
+    collection: String,
+    description: String,
+    name: String,
+    uri: String,
+    property_keys: vector<String>,
+    property_types: vector<String>,
+    property_values: vector<vector<u8>>,
+    soul_bound_to: address,
+): Object<AptosToken> acquires AptosCollection {
+    let constructor_ref = mint_internal(
+        creator,
+        collection,
+        description,
+        name,
+        uri,
+        property_keys,
+        property_types,
+        property_values,
+    );
+
+    let transfer_ref = object::generate_transfer_ref(&constructor_ref);
+    let linear_transfer_ref = object::generate_linear_transfer_ref(&transfer_ref);
+    object::transfer_with_ref(linear_transfer_ref, soul_bound_to);
+    object::disable_ungated_transfer(&transfer_ref);
+
+    object::object_from_constructor_ref(&constructor_ref)
+}
+
+ + + +
+ + + +## Function `mint_internal` + + + +
fun mint_internal(creator: &signer, collection: string::String, description: string::String, name: string::String, uri: string::String, property_keys: vector<string::String>, property_types: vector<string::String>, property_values: vector<vector<u8>>): object::ConstructorRef
+
+ + + +
+Implementation + + +
fun mint_internal(
+    creator: &signer,
+    collection: String,
+    description: String,
+    name: String,
+    uri: String,
+    property_keys: vector<String>,
+    property_types: vector<String>,
+    property_values: vector<vector<u8>>,
+): ConstructorRef acquires AptosCollection {
+    let constructor_ref = token::create(creator, collection, description, name, option::none(), uri);
+
+    let object_signer = object::generate_signer(&constructor_ref);
+
+    let collection_obj = collection_object(creator, &collection);
+    let collection = borrow_collection(&collection_obj);
+
+    let mutator_ref = if (
+        collection.mutable_token_description
+            || collection.mutable_token_name
+            || collection.mutable_token_uri
+    ) {
+        option::some(token::generate_mutator_ref(&constructor_ref))
+    } else {
+        option::none()
+    };
+
+    let burn_ref = if (collection.tokens_burnable_by_creator) {
+        option::some(token::generate_burn_ref(&constructor_ref))
+    } else {
+        option::none()
+    };
+
+    let aptos_token = AptosToken {
+        burn_ref,
+        transfer_ref: option::none(),
+        mutator_ref,
+        property_mutator_ref: property_map::generate_mutator_ref(&constructor_ref),
+    };
+    move_to(&object_signer, aptos_token);
+
+    let properties = property_map::prepare_input(property_keys, property_types, property_values);
+    property_map::init(&constructor_ref, properties);
+
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
fun borrow<T: key>(token: &object::Object<T>): &aptos_token::AptosToken
+
+ + + +
+Implementation + + +
inline fun borrow<T: key>(token: &Object<T>): &AptosToken {
+    let token_address = object::object_address(token);
+    assert!(
+        exists<AptosToken>(token_address),
+        error::not_found(ETOKEN_DOES_NOT_EXIST),
+    );
+    borrow_global<AptosToken>(token_address)
+}
+
+ + + +
+ + + +## Function `are_properties_mutable` + + + +
#[view]
+public fun are_properties_mutable<T: key>(token: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun are_properties_mutable<T: key>(token: Object<T>): bool acquires AptosCollection {
+    let collection = token::collection_object(token);
+    borrow_collection(&collection).mutable_token_properties
+}
+
+ + + +
+ + + +## Function `is_burnable` + + + +
#[view]
+public fun is_burnable<T: key>(token: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_burnable<T: key>(token: Object<T>): bool acquires AptosToken {
+    option::is_some(&borrow(&token).burn_ref)
+}
+
+ + + +
+ + + +## Function `is_freezable_by_creator` + + + +
#[view]
+public fun is_freezable_by_creator<T: key>(token: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_freezable_by_creator<T: key>(token: Object<T>): bool acquires AptosCollection {
+    are_collection_tokens_freezable(token::collection_object(token))
+}
+
+ + + +
+ + + +## Function `is_mutable_description` + + + +
#[view]
+public fun is_mutable_description<T: key>(token: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_description<T: key>(token: Object<T>): bool acquires AptosCollection {
+    is_mutable_collection_token_description(token::collection_object(token))
+}
+
+ + + +
+ + + +## Function `is_mutable_name` + + + +
#[view]
+public fun is_mutable_name<T: key>(token: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_name<T: key>(token: Object<T>): bool acquires AptosCollection {
+    is_mutable_collection_token_name(token::collection_object(token))
+}
+
+ + + +
+ + + +## Function `is_mutable_uri` + + + +
#[view]
+public fun is_mutable_uri<T: key>(token: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_uri<T: key>(token: Object<T>): bool acquires AptosCollection {
+    is_mutable_collection_token_uri(token::collection_object(token))
+}
+
+ + + +
+ + + +## Function `authorized_borrow` + + + +
fun authorized_borrow<T: key>(token: &object::Object<T>, creator: &signer): &aptos_token::AptosToken
+
+ + + +
+Implementation + + +
inline fun authorized_borrow<T: key>(token: &Object<T>, creator: &signer): &AptosToken {
+    let token_address = object::object_address(token);
+    assert!(
+        exists<AptosToken>(token_address),
+        error::not_found(ETOKEN_DOES_NOT_EXIST),
+    );
+
+    assert!(
+        token::creator(*token) == signer::address_of(creator),
+        error::permission_denied(ENOT_CREATOR),
+    );
+    borrow_global<AptosToken>(token_address)
+}
+
+ + + +
+ + + +## Function `burn` + + + +
public entry fun burn<T: key>(creator: &signer, token: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun burn<T: key>(creator: &signer, token: Object<T>) acquires AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        option::is_some(&aptos_token.burn_ref),
+        error::permission_denied(ETOKEN_NOT_BURNABLE),
+    );
+    move aptos_token;
+    let aptos_token = move_from<AptosToken>(object::object_address(&token));
+    let AptosToken {
+        burn_ref,
+        transfer_ref: _,
+        mutator_ref: _,
+        property_mutator_ref,
+    } = aptos_token;
+    property_map::burn(property_mutator_ref);
+    token::burn(option::extract(&mut burn_ref));
+}
+
+ + + +
+ + + +## Function `freeze_transfer` + + + +
public entry fun freeze_transfer<T: key>(creator: &signer, token: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun freeze_transfer<T: key>(creator: &signer, token: Object<T>) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_collection_tokens_freezable(token::collection_object(token))
+            && option::is_some(&aptos_token.transfer_ref),
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    object::disable_ungated_transfer(option::borrow(&aptos_token.transfer_ref));
+}
+
+ + + +
+ + + +## Function `unfreeze_transfer` + + + +
public entry fun unfreeze_transfer<T: key>(creator: &signer, token: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun unfreeze_transfer<T: key>(
+    creator: &signer,
+    token: Object<T>
+) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_collection_tokens_freezable(token::collection_object(token))
+            && option::is_some(&aptos_token.transfer_ref),
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    object::enable_ungated_transfer(option::borrow(&aptos_token.transfer_ref));
+}
+
+ + + +
+ + + +## Function `set_description` + + + +
public entry fun set_description<T: key>(creator: &signer, token: object::Object<T>, description: string::String)
+
+ + + +
+Implementation + + +
public entry fun set_description<T: key>(
+    creator: &signer,
+    token: Object<T>,
+    description: String,
+) acquires AptosCollection, AptosToken {
+    assert!(
+        is_mutable_description(token),
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    let aptos_token = authorized_borrow(&token, creator);
+    token::set_description(option::borrow(&aptos_token.mutator_ref), description);
+}
+
+ + + +
+ + + +## Function `set_name` + + + +
public entry fun set_name<T: key>(creator: &signer, token: object::Object<T>, name: string::String)
+
+ + + +
+Implementation + + +
public entry fun set_name<T: key>(
+    creator: &signer,
+    token: Object<T>,
+    name: String,
+) acquires AptosCollection, AptosToken {
+    assert!(
+        is_mutable_name(token),
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    let aptos_token = authorized_borrow(&token, creator);
+    token::set_name(option::borrow(&aptos_token.mutator_ref), name);
+}
+
+ + + +
+ + + +## Function `set_uri` + + + +
public entry fun set_uri<T: key>(creator: &signer, token: object::Object<T>, uri: string::String)
+
+ + + +
+Implementation + + +
public entry fun set_uri<T: key>(
+    creator: &signer,
+    token: Object<T>,
+    uri: String,
+) acquires AptosCollection, AptosToken {
+    assert!(
+        is_mutable_uri(token),
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    let aptos_token = authorized_borrow(&token, creator);
+    token::set_uri(option::borrow(&aptos_token.mutator_ref), uri);
+}
+
+ + + +
+ + + +## Function `add_property` + + + +
public entry fun add_property<T: key>(creator: &signer, token: object::Object<T>, key: string::String, type: string::String, value: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun add_property<T: key>(
+    creator: &signer,
+    token: Object<T>,
+    key: String,
+    type: String,
+    value: vector<u8>,
+) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_properties_mutable(token),
+        error::permission_denied(EPROPERTIES_NOT_MUTABLE),
+    );
+
+    property_map::add(&aptos_token.property_mutator_ref, key, type, value);
+}
+
+ + + +
+ + + +## Function `add_typed_property` + + + +
public entry fun add_typed_property<T: key, V: drop>(creator: &signer, token: object::Object<T>, key: string::String, value: V)
+
+ + + +
+Implementation + + +
public entry fun add_typed_property<T: key, V: drop>(
+    creator: &signer,
+    token: Object<T>,
+    key: String,
+    value: V,
+) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_properties_mutable(token),
+        error::permission_denied(EPROPERTIES_NOT_MUTABLE),
+    );
+
+    property_map::add_typed(&aptos_token.property_mutator_ref, key, value);
+}
+
+ + + +
+ + + +## Function `remove_property` + + + +
public entry fun remove_property<T: key>(creator: &signer, token: object::Object<T>, key: string::String)
+
+ + + +
+Implementation + + +
public entry fun remove_property<T: key>(
+    creator: &signer,
+    token: Object<T>,
+    key: String,
+) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_properties_mutable(token),
+        error::permission_denied(EPROPERTIES_NOT_MUTABLE),
+    );
+
+    property_map::remove(&aptos_token.property_mutator_ref, &key);
+}
+
+ + + +
+ + + +## Function `update_property` + + + +
public entry fun update_property<T: key>(creator: &signer, token: object::Object<T>, key: string::String, type: string::String, value: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun update_property<T: key>(
+    creator: &signer,
+    token: Object<T>,
+    key: String,
+    type: String,
+    value: vector<u8>,
+) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_properties_mutable(token),
+        error::permission_denied(EPROPERTIES_NOT_MUTABLE),
+    );
+
+    property_map::update(&aptos_token.property_mutator_ref, &key, type, value);
+}
+
+ + + +
+ + + +## Function `update_typed_property` + + + +
public entry fun update_typed_property<T: key, V: drop>(creator: &signer, token: object::Object<T>, key: string::String, value: V)
+
+ + + +
+Implementation + + +
public entry fun update_typed_property<T: key, V: drop>(
+    creator: &signer,
+    token: Object<T>,
+    key: String,
+    value: V,
+) acquires AptosCollection, AptosToken {
+    let aptos_token = authorized_borrow(&token, creator);
+    assert!(
+        are_properties_mutable(token),
+        error::permission_denied(EPROPERTIES_NOT_MUTABLE),
+    );
+
+    property_map::update_typed(&aptos_token.property_mutator_ref, &key, value);
+}
+
+ + + +
+ + + +## Function `collection_object` + + + +
fun collection_object(creator: &signer, name: &string::String): object::Object<aptos_token::AptosCollection>
+
+ + + +
+Implementation + + +
inline fun collection_object(creator: &signer, name: &String): Object<AptosCollection> {
+    let collection_addr = collection::create_collection_address(&signer::address_of(creator), name);
+    object::address_to_object<AptosCollection>(collection_addr)
+}
+
+ + + +
+ + + +## Function `borrow_collection` + + + +
fun borrow_collection<T: key>(token: &object::Object<T>): &aptos_token::AptosCollection
+
+ + + +
+Implementation + + +
inline fun borrow_collection<T: key>(token: &Object<T>): &AptosCollection {
+    let collection_address = object::object_address(token);
+    assert!(
+        exists<AptosCollection>(collection_address),
+        error::not_found(ECOLLECTION_DOES_NOT_EXIST),
+    );
+    borrow_global<AptosCollection>(collection_address)
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_description` + + + +
public fun is_mutable_collection_description<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_description<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).mutable_description
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_royalty` + + + +
public fun is_mutable_collection_royalty<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_royalty<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    option::is_some(&borrow_collection(&collection).royalty_mutator_ref)
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_uri` + + + +
public fun is_mutable_collection_uri<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_uri<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).mutable_uri
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_token_description` + + + +
public fun is_mutable_collection_token_description<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_token_description<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).mutable_token_description
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_token_name` + + + +
public fun is_mutable_collection_token_name<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_token_name<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).mutable_token_name
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_token_uri` + + + +
public fun is_mutable_collection_token_uri<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_token_uri<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).mutable_token_uri
+}
+
+ + + +
+ + + +## Function `is_mutable_collection_token_properties` + + + +
public fun is_mutable_collection_token_properties<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_mutable_collection_token_properties<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).mutable_token_properties
+}
+
+ + + +
+ + + +## Function `are_collection_tokens_burnable` + + + +
public fun are_collection_tokens_burnable<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun are_collection_tokens_burnable<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).tokens_burnable_by_creator
+}
+
+ + + +
+ + + +## Function `are_collection_tokens_freezable` + + + +
public fun are_collection_tokens_freezable<T: key>(collection: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun are_collection_tokens_freezable<T: key>(
+    collection: Object<T>,
+): bool acquires AptosCollection {
+    borrow_collection(&collection).tokens_freezable_by_creator
+}
+
+ + + +
+ + + +## Function `authorized_borrow_collection` + + + +
fun authorized_borrow_collection<T: key>(collection: &object::Object<T>, creator: &signer): &aptos_token::AptosCollection
+
+ + + +
+Implementation + + +
inline fun authorized_borrow_collection<T: key>(collection: &Object<T>, creator: &signer): &AptosCollection {
+    let collection_address = object::object_address(collection);
+    assert!(
+        exists<AptosCollection>(collection_address),
+        error::not_found(ECOLLECTION_DOES_NOT_EXIST),
+    );
+    assert!(
+        collection::creator(*collection) == signer::address_of(creator),
+        error::permission_denied(ENOT_CREATOR),
+    );
+    borrow_global<AptosCollection>(collection_address)
+}
+
+ + + +
+ + + +## Function `set_collection_description` + + + +
public entry fun set_collection_description<T: key>(creator: &signer, collection: object::Object<T>, description: string::String)
+
+ + + +
+Implementation + + +
public entry fun set_collection_description<T: key>(
+    creator: &signer,
+    collection: Object<T>,
+    description: String,
+) acquires AptosCollection {
+    let aptos_collection = authorized_borrow_collection(&collection, creator);
+    assert!(
+        aptos_collection.mutable_description,
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    collection::set_description(option::borrow(&aptos_collection.mutator_ref), description);
+}
+
+ + + +
+ + + +## Function `set_collection_royalties` + + + +
public fun set_collection_royalties<T: key>(creator: &signer, collection: object::Object<T>, royalty: royalty::Royalty)
+
+ + + +
+Implementation + + +
public fun set_collection_royalties<T: key>(
+    creator: &signer,
+    collection: Object<T>,
+    royalty: royalty::Royalty,
+) acquires AptosCollection {
+    let aptos_collection = authorized_borrow_collection(&collection, creator);
+    assert!(
+        option::is_some(&aptos_collection.royalty_mutator_ref),
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    royalty::update(option::borrow(&aptos_collection.royalty_mutator_ref), royalty);
+}
+
+ + + +
+ + + +## Function `set_collection_royalties_call` + + + +
entry fun set_collection_royalties_call<T: key>(creator: &signer, collection: object::Object<T>, royalty_numerator: u64, royalty_denominator: u64, payee_address: address)
+
+ + + +
+Implementation + + +
entry fun set_collection_royalties_call<T: key>(
+    creator: &signer,
+    collection: Object<T>,
+    royalty_numerator: u64,
+    royalty_denominator: u64,
+    payee_address: address,
+) acquires AptosCollection {
+    let royalty = royalty::create(royalty_numerator, royalty_denominator, payee_address);
+    set_collection_royalties(creator, collection, royalty);
+}
+
+ + + +
+ + + +## Function `set_collection_uri` + + + +
public entry fun set_collection_uri<T: key>(creator: &signer, collection: object::Object<T>, uri: string::String)
+
+ + + +
+Implementation + + +
public entry fun set_collection_uri<T: key>(
+    creator: &signer,
+    collection: Object<T>,
+    uri: String,
+) acquires AptosCollection {
+    let aptos_collection = authorized_borrow_collection(&collection, creator);
+    assert!(
+        aptos_collection.mutable_uri,
+        error::permission_denied(EFIELD_NOT_MUTABLE),
+    );
+    collection::set_uri(option::borrow(&aptos_collection.mutator_ref), uri);
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/collection.md b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/collection.md new file mode 100644 index 0000000000000..5139b860a3f50 --- /dev/null +++ b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/collection.md @@ -0,0 +1,1828 @@ + + + +# Module `0x4::collection` + +This defines an object-based Collection. A collection acts as a set organizer for a group of +tokens. This includes aspects such as a general description, project URI, name, and may contain +other useful generalizations across this set of tokens. + +Being built upon objects enables collections to be relatively flexible. As core primitives it +supports: +* Common fields: name, uri, description, creator +* MutatorRef leaving mutability configuration to a higher level component +* Addressed by a global identifier of creator's address and collection name, thus collections +cannot be deleted as a restriction of the object model. +* Optional support for collection-wide royalties +* Optional support for tracking of supply with events on mint or burn + +TODO: +* Consider supporting changing the name of the collection with the MutatorRef. This would +require adding the field original_name. +* Consider supporting changing the aspects of supply with the MutatorRef. +* Add aggregator support when added to framework + + +- [Resource `Collection`](#0x4_collection_Collection) +- [Struct `MutatorRef`](#0x4_collection_MutatorRef) +- [Struct `MutationEvent`](#0x4_collection_MutationEvent) +- [Struct `Mutation`](#0x4_collection_Mutation) +- [Resource `FixedSupply`](#0x4_collection_FixedSupply) +- [Resource `UnlimitedSupply`](#0x4_collection_UnlimitedSupply) +- [Resource `ConcurrentSupply`](#0x4_collection_ConcurrentSupply) +- [Struct `BurnEvent`](#0x4_collection_BurnEvent) +- [Struct `MintEvent`](#0x4_collection_MintEvent) +- [Struct `Burn`](#0x4_collection_Burn) +- [Struct `Mint`](#0x4_collection_Mint) +- [Struct `ConcurrentBurnEvent`](#0x4_collection_ConcurrentBurnEvent) +- [Struct `ConcurrentMintEvent`](#0x4_collection_ConcurrentMintEvent) +- [Struct `SetMaxSupply`](#0x4_collection_SetMaxSupply) +- [Constants](#@Constants_0) +- [Function `create_fixed_collection`](#0x4_collection_create_fixed_collection) +- [Function `create_fixed_collection_as_owner`](#0x4_collection_create_fixed_collection_as_owner) +- [Function `create_unlimited_collection`](#0x4_collection_create_unlimited_collection) +- [Function `create_unlimited_collection_as_owner`](#0x4_collection_create_unlimited_collection_as_owner) +- [Function `create_untracked_collection`](#0x4_collection_create_untracked_collection) +- [Function `create_collection_internal`](#0x4_collection_create_collection_internal) +- [Function `enable_ungated_transfer`](#0x4_collection_enable_ungated_transfer) +- [Function `create_collection_address`](#0x4_collection_create_collection_address) +- [Function `create_collection_seed`](#0x4_collection_create_collection_seed) +- [Function `increment_supply`](#0x4_collection_increment_supply) +- [Function `decrement_supply`](#0x4_collection_decrement_supply) +- [Function `generate_mutator_ref`](#0x4_collection_generate_mutator_ref) +- [Function `upgrade_to_concurrent`](#0x4_collection_upgrade_to_concurrent) +- [Function `check_collection_exists`](#0x4_collection_check_collection_exists) +- [Function `borrow`](#0x4_collection_borrow) +- [Function `count`](#0x4_collection_count) +- [Function `creator`](#0x4_collection_creator) +- [Function `description`](#0x4_collection_description) +- [Function `name`](#0x4_collection_name) +- [Function `uri`](#0x4_collection_uri) +- [Function `borrow_mut`](#0x4_collection_borrow_mut) +- [Function `set_name`](#0x4_collection_set_name) +- [Function `set_description`](#0x4_collection_set_description) +- [Function `set_uri`](#0x4_collection_set_uri) +- [Function `set_max_supply`](#0x4_collection_set_max_supply) + + +
use 0x1::aggregator_v2;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::object;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x4::royalty;
+
+ + + + + +## Resource `Collection` + +Represents the common fields for a collection. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Collection has key
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ The creator of this collection. +
+
+description: string::String +
+
+ A brief description of the collection. +
+
+name: string::String +
+
+ An optional categorization of similar token. +
+
+uri: string::String +
+
+ The Uniform Resource Identifier (uri) pointing to the JSON file stored in off-chain + storage; the URL length will likely need a maximum any suggestions? +
+
+mutation_events: event::EventHandle<collection::MutationEvent> +
+
+ Emitted upon any mutation of the collection. +
+
+ + +
+ + + +## Struct `MutatorRef` + +This enables mutating description and URI by higher level services. + + +
struct MutatorRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Struct `MutationEvent` + +Contains the mutated fields name. This makes the life of indexers easier, so that they can +directly understand the behavior in a writeset. + + +
struct MutationEvent has drop, store
+
+ + + +
+Fields + + +
+
+mutated_field_name: string::String +
+
+ +
+
+ + +
+ + + +## Struct `Mutation` + +Contains the mutated fields name. This makes the life of indexers easier, so that they can +directly understand the behavior in a writeset. + + +
#[event]
+struct Mutation has drop, store
+
+ + + +
+Fields + + +
+
+mutated_field_name: string::String +
+
+ +
+
+collection: object::Object<collection::Collection> +
+
+ +
+
+old_value: string::String +
+
+ +
+
+new_value: string::String +
+
+ +
+
+ + +
+ + + +## Resource `FixedSupply` + +Fixed supply tracker, this is useful for ensuring that a limited number of tokens are minted. +and adding events and supply tracking to a collection. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct FixedSupply has key
+
+ + + +
+Fields + + +
+
+current_supply: u64 +
+
+ Total minted - total burned +
+
+max_supply: u64 +
+
+ +
+
+total_minted: u64 +
+
+ +
+
+burn_events: event::EventHandle<collection::BurnEvent> +
+
+ Emitted upon burning a Token. +
+
+mint_events: event::EventHandle<collection::MintEvent> +
+
+ Emitted upon minting an Token. +
+
+ + +
+ + + +## Resource `UnlimitedSupply` + +Unlimited supply tracker, this is useful for adding events and supply tracking to a collection. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct UnlimitedSupply has key
+
+ + + +
+Fields + + +
+
+current_supply: u64 +
+
+ +
+
+total_minted: u64 +
+
+ +
+
+burn_events: event::EventHandle<collection::BurnEvent> +
+
+ Emitted upon burning a Token. +
+
+mint_events: event::EventHandle<collection::MintEvent> +
+
+ Emitted upon minting an Token. +
+
+ + +
+ + + +## Resource `ConcurrentSupply` + +Supply tracker, useful for tracking amount of issued tokens. +If max_value is not set to U64_MAX, this ensures that a limited number of tokens are minted. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct ConcurrentSupply has key
+
+ + + +
+Fields + + +
+
+current_supply: aggregator_v2::Aggregator<u64> +
+
+ Total minted - total burned +
+
+total_minted: aggregator_v2::Aggregator<u64> +
+
+ +
+
+ + +
+ + + +## Struct `BurnEvent` + + + +
struct BurnEvent has drop, store
+
+ + + +
+Fields + + +
+
+index: u64 +
+
+ +
+
+token: address +
+
+ +
+
+ + +
+ + + +## Struct `MintEvent` + + + +
struct MintEvent has drop, store
+
+ + + +
+Fields + + +
+
+index: u64 +
+
+ +
+
+token: address +
+
+ +
+
+ + +
+ + + +## Struct `Burn` + + + +
#[event]
+struct Burn has drop, store
+
+ + + +
+Fields + + +
+
+collection: address +
+
+ +
+
+index: u64 +
+
+ +
+
+token: address +
+
+ +
+
+previous_owner: address +
+
+ +
+
+ + +
+ + + +## Struct `Mint` + + + +
#[event]
+struct Mint has drop, store
+
+ + + +
+Fields + + +
+
+collection: address +
+
+ +
+
+index: aggregator_v2::AggregatorSnapshot<u64> +
+
+ +
+
+token: address +
+
+ +
+
+ + +
+ + + +## Struct `ConcurrentBurnEvent` + + + +
#[event]
+#[deprecated]
+struct ConcurrentBurnEvent has drop, store
+
+ + + +
+Fields + + +
+
+collection_addr: address +
+
+ +
+
+index: u64 +
+
+ +
+
+token: address +
+
+ +
+
+ + +
+ + + +## Struct `ConcurrentMintEvent` + + + +
#[event]
+#[deprecated]
+struct ConcurrentMintEvent has drop, store
+
+ + + +
+Fields + + +
+
+collection_addr: address +
+
+ +
+
+index: aggregator_v2::AggregatorSnapshot<u64> +
+
+ +
+
+token: address +
+
+ +
+
+ + +
+ + + +## Struct `SetMaxSupply` + + + +
#[event]
+struct SetMaxSupply has drop, store
+
+ + + +
+Fields + + +
+
+collection: object::Object<collection::Collection> +
+
+ +
+
+old_max_supply: u64 +
+
+ +
+
+new_max_supply: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + +The URI is over the maximum length + + +
const EURI_TOO_LONG: u64 = 4;
+
+ + + + + + + +
const MAX_URI_LENGTH: u64 = 512;
+
+ + + + + +Tried upgrading collection to concurrent, but collection is already concurrent + + +
const EALREADY_CONCURRENT: u64 = 8;
+
+ + + + + +The collection does not exist + + +
const ECOLLECTION_DOES_NOT_EXIST: u64 = 1;
+
+ + + + + +The collection name is over the maximum length + + +
const ECOLLECTION_NAME_TOO_LONG: u64 = 3;
+
+ + + + + +The collection owner feature is not supported + + +
const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 11;
+
+ + + + + +The collection has reached its supply and no more tokens can be minted, unless some are burned + + +
const ECOLLECTION_SUPPLY_EXCEEDED: u64 = 2;
+
+ + + + + +Concurrent feature flag is not yet enabled, so the function cannot be performed + + +
const ECONCURRENT_NOT_ENABLED: u64 = 7;
+
+ + + + + +The description is over the maximum length + + +
const EDESCRIPTION_TOO_LONG: u64 = 5;
+
+ + + + + +The new max supply cannot be less than the current supply + + +
const EINVALID_MAX_SUPPLY: u64 = 9;
+
+ + + + + +The max supply must be positive + + +
const EMAX_SUPPLY_CANNOT_BE_ZERO: u64 = 6;
+
+ + + + + +The collection does not have a max supply + + +
const ENO_MAX_SUPPLY_IN_COLLECTION: u64 = 10;
+
+ + + + + + + +
const MAX_COLLECTION_NAME_LENGTH: u64 = 128;
+
+ + + + + + + +
const MAX_DESCRIPTION_LENGTH: u64 = 2048;
+
+ + + + + +## Function `create_fixed_collection` + +Creates a fixed-sized collection, or a collection that supports a fixed amount of tokens. +This is useful to create a guaranteed, limited supply on-chain digital asset. For example, +a collection 1111 vicious vipers. Note, creating restrictions such as upward limits results +in data structures that prevent Aptos from parallelizing mints of this collection type. +Beyond that, it adds supply tracking with events. + + +
public fun create_fixed_collection(creator: &signer, description: string::String, max_supply: u64, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_fixed_collection(
+    creator: &signer,
+    description: String,
+    max_supply: u64,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(max_supply != 0, error::invalid_argument(EMAX_SUPPLY_CANNOT_BE_ZERO));
+    let collection_seed = create_collection_seed(&name);
+    let constructor_ref = object::create_named_object(creator, collection_seed);
+
+    let supply = ConcurrentSupply {
+        current_supply: aggregator_v2::create_aggregator(max_supply),
+        total_minted: aggregator_v2::create_unbounded_aggregator(),
+    };
+
+    create_collection_internal(
+        creator,
+        constructor_ref,
+        description,
+        name,
+        royalty,
+        uri,
+        option::some(supply),
+    )
+}
+
+ + + +
+ + + +## Function `create_fixed_collection_as_owner` + +Same functionality as create_fixed_collection, but the caller is the owner of the collection. +This means that the caller can transfer the collection to another address. +This transfers ownership and minting permissions to the new address. + + +
public fun create_fixed_collection_as_owner(creator: &signer, description: string::String, max_supply: u64, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_fixed_collection_as_owner(
+    creator: &signer,
+    description: String,
+    max_supply: u64,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+
+    let constructor_ref = create_fixed_collection(
+        creator,
+        description,
+        max_supply,
+        name,
+        royalty,
+        uri,
+    );
+    enable_ungated_transfer(&constructor_ref);
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_unlimited_collection` + +Creates an unlimited collection. This has support for supply tracking but does not limit +the supply of tokens. + + +
public fun create_unlimited_collection(creator: &signer, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_unlimited_collection(
+    creator: &signer,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let collection_seed = create_collection_seed(&name);
+    let constructor_ref = object::create_named_object(creator, collection_seed);
+
+    let supply = ConcurrentSupply {
+        current_supply: aggregator_v2::create_unbounded_aggregator(),
+        total_minted: aggregator_v2::create_unbounded_aggregator(),
+    };
+
+    create_collection_internal(
+        creator,
+        constructor_ref,
+        description,
+        name,
+        royalty,
+        uri,
+        option::some(supply),
+    )
+}
+
+ + + +
+ + + +## Function `create_unlimited_collection_as_owner` + +Same functionality as create_unlimited_collection, but the caller is the owner of the collection. +This means that the caller can transfer the collection to another address. +This transfers ownership and minting permissions to the new address. + + +
public fun create_unlimited_collection_as_owner(creator: &signer, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_unlimited_collection_as_owner(
+    creator: &signer,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+
+    let constructor_ref = create_unlimited_collection(
+        creator,
+        description,
+        name,
+        royalty,
+        uri,
+    );
+    enable_ungated_transfer(&constructor_ref);
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_untracked_collection` + +Creates an untracked collection, or a collection that supports an arbitrary amount of +tokens. This is useful for mass airdrops that fully leverage Aptos parallelization. +TODO: Hide this until we bring back meaningful way to enforce burns + + +
fun create_untracked_collection(creator: &signer, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
fun create_untracked_collection(
+    creator: &signer,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let collection_seed = create_collection_seed(&name);
+    let constructor_ref = object::create_named_object(creator, collection_seed);
+
+    create_collection_internal<FixedSupply>(
+        creator,
+        constructor_ref,
+        description,
+        name,
+        royalty,
+        uri,
+        option::none(),
+    )
+}
+
+ + + +
+ + + +## Function `create_collection_internal` + + + +
fun create_collection_internal<Supply: key>(creator: &signer, constructor_ref: object::ConstructorRef, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String, supply: option::Option<Supply>): object::ConstructorRef
+
+ + + +
+Implementation + + +
inline fun create_collection_internal<Supply: key>(
+    creator: &signer,
+    constructor_ref: ConstructorRef,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+    supply: Option<Supply>,
+): ConstructorRef {
+    assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+
+    let object_signer = object::generate_signer(&constructor_ref);
+
+    let collection = Collection {
+        creator: signer::address_of(creator),
+        description,
+        name,
+        uri,
+        mutation_events: object::new_event_handle(&object_signer),
+    };
+    move_to(&object_signer, collection);
+
+    if (option::is_some(&supply)) {
+        move_to(&object_signer, option::destroy_some(supply))
+    } else {
+        option::destroy_none(supply)
+    };
+
+    if (option::is_some(&royalty)) {
+        royalty::init(&constructor_ref, option::extract(&mut royalty))
+    };
+
+    let transfer_ref = object::generate_transfer_ref(&constructor_ref);
+    object::disable_ungated_transfer(&transfer_ref);
+
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `enable_ungated_transfer` + + + +
fun enable_ungated_transfer(constructor_ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
inline fun enable_ungated_transfer(constructor_ref: &ConstructorRef) {
+    let transfer_ref = object::generate_transfer_ref(constructor_ref);
+    object::enable_ungated_transfer(&transfer_ref);
+}
+
+ + + +
+ + + +## Function `create_collection_address` + +Generates the collections address based upon the creators address and the collection's name + + +
public fun create_collection_address(creator: &address, name: &string::String): address
+
+ + + +
+Implementation + + +
public fun create_collection_address(creator: &address, name: &String): address {
+    object::create_object_address(creator, create_collection_seed(name))
+}
+
+ + + +
+ + + +## Function `create_collection_seed` + +Named objects are derived from a seed, the collection's seed is its name. + + +
public fun create_collection_seed(name: &string::String): vector<u8>
+
+ + + +
+Implementation + + +
public fun create_collection_seed(name: &String): vector<u8> {
+    assert!(string::length(name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
+    *string::bytes(name)
+}
+
+ + + +
+ + + +## Function `increment_supply` + +Called by token on mint to increment supply if there's an appropriate Supply struct. + + +
public(friend) fun increment_supply(collection: &object::Object<collection::Collection>, token: address): option::Option<aggregator_v2::AggregatorSnapshot<u64>>
+
+ + + +
+Implementation + + +
public(friend) fun increment_supply(
+    collection: &Object<Collection>,
+    token: address,
+): Option<AggregatorSnapshot<u64>> acquires FixedSupply, UnlimitedSupply, ConcurrentSupply {
+    let collection_addr = object::object_address(collection);
+    if (exists<ConcurrentSupply>(collection_addr)) {
+        let supply = borrow_global_mut<ConcurrentSupply>(collection_addr);
+        assert!(
+            aggregator_v2::try_add(&mut supply.current_supply, 1),
+            error::out_of_range(ECOLLECTION_SUPPLY_EXCEEDED),
+        );
+        aggregator_v2::add(&mut supply.total_minted, 1);
+        event::emit(
+            Mint {
+                collection: collection_addr,
+                index: aggregator_v2::snapshot(&supply.total_minted),
+                token,
+            },
+        );
+        option::some(aggregator_v2::snapshot(&supply.total_minted))
+    } else if (exists<FixedSupply>(collection_addr)) {
+        let supply = borrow_global_mut<FixedSupply>(collection_addr);
+        supply.current_supply = supply.current_supply + 1;
+        supply.total_minted = supply.total_minted + 1;
+        assert!(
+            supply.current_supply <= supply.max_supply,
+            error::out_of_range(ECOLLECTION_SUPPLY_EXCEEDED),
+        );
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                Mint {
+                    collection: collection_addr,
+                    index: aggregator_v2::create_snapshot(supply.total_minted),
+                    token,
+                },
+            );
+        };
+        event::emit_event(&mut supply.mint_events,
+            MintEvent {
+                index: supply.total_minted,
+                token,
+            },
+        );
+        option::some(aggregator_v2::create_snapshot<u64>(supply.total_minted))
+    } else if (exists<UnlimitedSupply>(collection_addr)) {
+        let supply = borrow_global_mut<UnlimitedSupply>(collection_addr);
+        supply.current_supply = supply.current_supply + 1;
+        supply.total_minted = supply.total_minted + 1;
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                Mint {
+                    collection: collection_addr,
+                    index: aggregator_v2::create_snapshot(supply.total_minted),
+                    token,
+                },
+            );
+        };
+        event::emit_event(
+            &mut supply.mint_events,
+            MintEvent {
+                index: supply.total_minted,
+                token,
+            },
+        );
+        option::some(aggregator_v2::create_snapshot<u64>(supply.total_minted))
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `decrement_supply` + +Called by token on burn to decrement supply if there's an appropriate Supply struct. + + +
public(friend) fun decrement_supply(collection: &object::Object<collection::Collection>, token: address, index: option::Option<u64>, previous_owner: address)
+
+ + + +
+Implementation + + +
public(friend) fun decrement_supply(
+    collection: &Object<Collection>,
+    token: address,
+    index: Option<u64>,
+    previous_owner: address,
+) acquires FixedSupply, UnlimitedSupply, ConcurrentSupply {
+    let collection_addr = object::object_address(collection);
+    if (exists<ConcurrentSupply>(collection_addr)) {
+        let supply = borrow_global_mut<ConcurrentSupply>(collection_addr);
+        aggregator_v2::sub(&mut supply.current_supply, 1);
+
+        event::emit(
+            Burn {
+                collection: collection_addr,
+                index: *option::borrow(&index),
+                token,
+                previous_owner,
+            },
+        );
+    } else if (exists<FixedSupply>(collection_addr)) {
+        let supply = borrow_global_mut<FixedSupply>(collection_addr);
+        supply.current_supply = supply.current_supply - 1;
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                Burn {
+                    collection: collection_addr,
+                    index: *option::borrow(&index),
+                    token,
+                    previous_owner,
+                },
+            );
+        };
+        event::emit_event(
+            &mut supply.burn_events,
+            BurnEvent {
+                index: *option::borrow(&index),
+                token,
+            },
+        );
+    } else if (exists<UnlimitedSupply>(collection_addr)) {
+        let supply = borrow_global_mut<UnlimitedSupply>(collection_addr);
+        supply.current_supply = supply.current_supply - 1;
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                Burn {
+                    collection: collection_addr,
+                    index: *option::borrow(&index),
+                    token,
+                    previous_owner,
+                },
+            );
+        };
+        event::emit_event(
+            &mut supply.burn_events,
+            BurnEvent {
+                index: *option::borrow(&index),
+                token,
+            },
+        );
+    }
+}
+
+ + + +
+ + + +## Function `generate_mutator_ref` + +Creates a MutatorRef, which gates the ability to mutate any fields that support mutation. + + +
public fun generate_mutator_ref(ref: &object::ConstructorRef): collection::MutatorRef
+
+ + + +
+Implementation + + +
public fun generate_mutator_ref(ref: &ConstructorRef): MutatorRef {
+    let object = object::object_from_constructor_ref<Collection>(ref);
+    MutatorRef { self: object::object_address(&object) }
+}
+
+ + + +
+ + + +## Function `upgrade_to_concurrent` + + + +
public fun upgrade_to_concurrent(ref: &object::ExtendRef)
+
+ + + +
+Implementation + + +
public fun upgrade_to_concurrent(
+    ref: &ExtendRef,
+) acquires FixedSupply, UnlimitedSupply {
+    let metadata_object_address = object::address_from_extend_ref(ref);
+    let metadata_object_signer = object::generate_signer_for_extending(ref);
+
+    let (supply, current_supply, total_minted, burn_events, mint_events) = if (exists<FixedSupply>(
+        metadata_object_address
+    )) {
+        let FixedSupply {
+            current_supply,
+            max_supply,
+            total_minted,
+            burn_events,
+            mint_events,
+        } = move_from<FixedSupply>(metadata_object_address);
+
+        let supply = ConcurrentSupply {
+            current_supply: aggregator_v2::create_aggregator(max_supply),
+            total_minted: aggregator_v2::create_unbounded_aggregator(),
+        };
+        (supply, current_supply, total_minted, burn_events, mint_events)
+    } else if (exists<UnlimitedSupply>(metadata_object_address)) {
+        let UnlimitedSupply {
+            current_supply,
+            total_minted,
+            burn_events,
+            mint_events,
+        } = move_from<UnlimitedSupply>(metadata_object_address);
+
+        let supply = ConcurrentSupply {
+            current_supply: aggregator_v2::create_unbounded_aggregator(),
+            total_minted: aggregator_v2::create_unbounded_aggregator(),
+        };
+        (supply, current_supply, total_minted, burn_events, mint_events)
+    } else {
+        // untracked collection is already concurrent, and other variants too.
+        abort error::invalid_argument(EALREADY_CONCURRENT)
+    };
+
+    // update current state:
+    aggregator_v2::add(&mut supply.current_supply, current_supply);
+    aggregator_v2::add(&mut supply.total_minted, total_minted);
+    move_to(&metadata_object_signer, supply);
+
+    event::destroy_handle(burn_events);
+    event::destroy_handle(mint_events);
+}
+
+ + + +
+ + + +## Function `check_collection_exists` + + + +
fun check_collection_exists(addr: address)
+
+ + + +
+Implementation + + +
inline fun check_collection_exists(addr: address) {
+    assert!(
+        exists<Collection>(addr),
+        error::not_found(ECOLLECTION_DOES_NOT_EXIST),
+    );
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
fun borrow<T: key>(collection: &object::Object<T>): &collection::Collection
+
+ + + +
+Implementation + + +
inline fun borrow<T: key>(collection: &Object<T>): &Collection {
+    let collection_address = object::object_address(collection);
+    check_collection_exists(collection_address);
+    borrow_global<Collection>(collection_address)
+}
+
+ + + +
+ + + +## Function `count` + +Provides the count of the current selection if supply tracking is used + +Note: Calling this method from transaction that also mints/burns, prevents +it from being parallelized. + + +
#[view]
+public fun count<T: key>(collection: object::Object<T>): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun count<T: key>(
+    collection: Object<T>
+): Option<u64> acquires FixedSupply, UnlimitedSupply, ConcurrentSupply {
+    let collection_address = object::object_address(&collection);
+    check_collection_exists(collection_address);
+
+    if (exists<ConcurrentSupply>(collection_address)) {
+        let supply = borrow_global_mut<ConcurrentSupply>(collection_address);
+        option::some(aggregator_v2::read(&supply.current_supply))
+    } else if (exists<FixedSupply>(collection_address)) {
+        let supply = borrow_global_mut<FixedSupply>(collection_address);
+        option::some(supply.current_supply)
+    } else if (exists<UnlimitedSupply>(collection_address)) {
+        let supply = borrow_global_mut<UnlimitedSupply>(collection_address);
+        option::some(supply.current_supply)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `creator` + + + +
#[view]
+public fun creator<T: key>(collection: object::Object<T>): address
+
+ + + +
+Implementation + + +
public fun creator<T: key>(collection: Object<T>): address acquires Collection {
+    borrow(&collection).creator
+}
+
+ + + +
+ + + +## Function `description` + + + +
#[view]
+public fun description<T: key>(collection: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun description<T: key>(collection: Object<T>): String acquires Collection {
+    borrow(&collection).description
+}
+
+ + + +
+ + + +## Function `name` + + + +
#[view]
+public fun name<T: key>(collection: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun name<T: key>(collection: Object<T>): String acquires Collection {
+    borrow(&collection).name
+}
+
+ + + +
+ + + +## Function `uri` + + + +
#[view]
+public fun uri<T: key>(collection: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun uri<T: key>(collection: Object<T>): String acquires Collection {
+    borrow(&collection).uri
+}
+
+ + + +
+ + + +## Function `borrow_mut` + + + +
fun borrow_mut(mutator_ref: &collection::MutatorRef): &mut collection::Collection
+
+ + + +
+Implementation + + +
inline fun borrow_mut(mutator_ref: &MutatorRef): &mut Collection {
+    check_collection_exists(mutator_ref.self);
+    borrow_global_mut<Collection>(mutator_ref.self)
+}
+
+ + + +
+ + + +## Function `set_name` + +Callers of this function must be aware that changing the name will change the calculated +collection's address when calling create_collection_address. +Once the collection has been created, the collection address should be saved for reference and +create_collection_address should not be used to derive the collection's address. + +After changing the collection's name, to create tokens - only call functions that accept the collection object as an argument. + + +
public fun set_name(mutator_ref: &collection::MutatorRef, name: string::String)
+
+ + + +
+Implementation + + +
public fun set_name(mutator_ref: &MutatorRef, name: String) acquires Collection {
+    assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
+    let collection = borrow_mut(mutator_ref);
+    event::emit(Mutation {
+        mutated_field_name: string::utf8(b"name") ,
+        collection: object::address_to_object(mutator_ref.self),
+        old_value: collection.name,
+        new_value: name,
+    });
+    collection.name = name;
+}
+
+ + + +
+ + + +## Function `set_description` + + + +
public fun set_description(mutator_ref: &collection::MutatorRef, description: string::String)
+
+ + + +
+Implementation + + +
public fun set_description(mutator_ref: &MutatorRef, description: String) acquires Collection {
+    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+    let collection = borrow_mut(mutator_ref);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Mutation {
+            mutated_field_name: string::utf8(b"description"),
+            collection: object::address_to_object(mutator_ref.self),
+            old_value: collection.description,
+            new_value: description,
+        });
+    };
+    collection.description = description;
+    event::emit_event(
+        &mut collection.mutation_events,
+        MutationEvent { mutated_field_name: string::utf8(b"description") },
+    );
+}
+
+ + + +
+ + + +## Function `set_uri` + + + +
public fun set_uri(mutator_ref: &collection::MutatorRef, uri: string::String)
+
+ + + +
+Implementation + + +
public fun set_uri(mutator_ref: &MutatorRef, uri: String) acquires Collection {
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    let collection = borrow_mut(mutator_ref);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Mutation {
+            mutated_field_name: string::utf8(b"uri"),
+            collection: object::address_to_object(mutator_ref.self),
+            old_value: collection.uri,
+            new_value: uri,
+        });
+    };
+    collection.uri = uri;
+    event::emit_event(
+        &mut collection.mutation_events,
+        MutationEvent { mutated_field_name: string::utf8(b"uri") },
+    );
+}
+
+ + + +
+ + + +## Function `set_max_supply` + + + +
public fun set_max_supply(mutator_ref: &collection::MutatorRef, max_supply: u64)
+
+ + + +
+Implementation + + +
public fun set_max_supply(mutator_ref: &MutatorRef, max_supply: u64) acquires ConcurrentSupply, FixedSupply {
+    let collection = object::address_to_object<Collection>(mutator_ref.self);
+    let collection_address = object::object_address(&collection);
+    let old_max_supply;
+
+    if (exists<ConcurrentSupply>(collection_address)) {
+        let supply = borrow_global_mut<ConcurrentSupply>(collection_address);
+        let current_supply = aggregator_v2::read(&supply.current_supply);
+        assert!(
+            max_supply >= current_supply,
+            error::out_of_range(EINVALID_MAX_SUPPLY),
+        );
+        old_max_supply = aggregator_v2::max_value(&supply.current_supply);
+        supply.current_supply = aggregator_v2::create_aggregator(max_supply);
+        aggregator_v2::add(&mut supply.current_supply, current_supply);
+    } else if (exists<FixedSupply>(collection_address)) {
+        let supply = borrow_global_mut<FixedSupply>(collection_address);
+        assert!(
+            max_supply >= supply.current_supply,
+            error::out_of_range(EINVALID_MAX_SUPPLY),
+        );
+        old_max_supply = supply.max_supply;
+        supply.max_supply = max_supply;
+    } else {
+        abort error::invalid_argument(ENO_MAX_SUPPLY_IN_COLLECTION)
+    };
+
+    event::emit(SetMaxSupply { collection, old_max_supply, new_max_supply: max_supply });
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/overview.md b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/overview.md new file mode 100644 index 0000000000000..eda0865b6e708 --- /dev/null +++ b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/overview.md @@ -0,0 +1,22 @@ + + + +# Aptos Token Framework + + +This is the reference documentation of the Aptos Token Objects framework. + + + + +## Index + + +- [`0x4::aptos_token`](aptos_token.md#0x4_aptos_token) +- [`0x4::collection`](collection.md#0x4_collection) +- [`0x4::property_map`](property_map.md#0x4_property_map) +- [`0x4::royalty`](royalty.md#0x4_royalty) +- [`0x4::token`](token.md#0x4_token) + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/property_map.md b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/property_map.md new file mode 100644 index 0000000000000..d6e55d10f293a --- /dev/null +++ b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/property_map.md @@ -0,0 +1,1282 @@ + + + +# Module `0x4::property_map` + +PropertyMap provides generic metadata support for AptosToken. It is a specialization of +SimpleMap that enforces strict typing with minimal storage use by using constant u64 to +represent types and storing values in bcs format. + + +- [Resource `PropertyMap`](#0x4_property_map_PropertyMap) +- [Struct `PropertyValue`](#0x4_property_map_PropertyValue) +- [Struct `MutatorRef`](#0x4_property_map_MutatorRef) +- [Constants](#@Constants_0) +- [Function `init`](#0x4_property_map_init) +- [Function `extend`](#0x4_property_map_extend) +- [Function `burn`](#0x4_property_map_burn) +- [Function `prepare_input`](#0x4_property_map_prepare_input) +- [Function `to_external_type`](#0x4_property_map_to_external_type) +- [Function `to_internal_type`](#0x4_property_map_to_internal_type) +- [Function `type_info_to_internal_type`](#0x4_property_map_type_info_to_internal_type) +- [Function `validate_type`](#0x4_property_map_validate_type) +- [Function `generate_mutator_ref`](#0x4_property_map_generate_mutator_ref) +- [Function `contains_key`](#0x4_property_map_contains_key) +- [Function `length`](#0x4_property_map_length) +- [Function `read`](#0x4_property_map_read) +- [Function `assert_exists`](#0x4_property_map_assert_exists) +- [Function `read_typed`](#0x4_property_map_read_typed) +- [Function `read_bool`](#0x4_property_map_read_bool) +- [Function `read_u8`](#0x4_property_map_read_u8) +- [Function `read_u16`](#0x4_property_map_read_u16) +- [Function `read_u32`](#0x4_property_map_read_u32) +- [Function `read_u64`](#0x4_property_map_read_u64) +- [Function `read_u128`](#0x4_property_map_read_u128) +- [Function `read_u256`](#0x4_property_map_read_u256) +- [Function `read_address`](#0x4_property_map_read_address) +- [Function `read_bytes`](#0x4_property_map_read_bytes) +- [Function `read_string`](#0x4_property_map_read_string) +- [Function `add`](#0x4_property_map_add) +- [Function `add_typed`](#0x4_property_map_add_typed) +- [Function `add_internal`](#0x4_property_map_add_internal) +- [Function `update`](#0x4_property_map_update) +- [Function `update_typed`](#0x4_property_map_update_typed) +- [Function `update_internal`](#0x4_property_map_update_internal) +- [Function `remove`](#0x4_property_map_remove) +- [Function `assert_end_to_end_input`](#0x4_property_map_assert_end_to_end_input) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::from_bcs;
+use 0x1::object;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::type_info;
+use 0x1::vector;
+
+ + + + + +## Resource `PropertyMap` + +A Map for typed key to value mapping, the contract using it +should keep track of what keys are what types, and parse them accordingly. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct PropertyMap has drop, key
+
+ + + +
+Fields + + +
+
+inner: simple_map::SimpleMap<string::String, property_map::PropertyValue> +
+
+ +
+
+ + +
+ + + +## Struct `PropertyValue` + +A typed value for the PropertyMap to ensure that typing is always consistent + + +
struct PropertyValue has drop, store
+
+ + + +
+Fields + + +
+
+type: u8 +
+
+ +
+
+value: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `MutatorRef` + +A mutator ref that allows for mutation of the property map + + +
struct MutatorRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Property value does not match expected type + + +
const ETYPE_MISMATCH: u64 = 6;
+
+ + + + + + + +
const ADDRESS: u8 = 7;
+
+ + + + + + + +
const BOOL: u8 = 0;
+
+ + + + + + + +
const BYTE_VECTOR: u8 = 8;
+
+ + + + + +The property key already exists + + +
const EKEY_ALREADY_EXISTS_IN_PROPERTY_MAP: u64 = 2;
+
+ + + + + +Property key and type counts do not match + + +
const EKEY_TYPE_COUNT_MISMATCH: u64 = 5;
+
+ + + + + +Property key and value counts do not match + + +
const EKEY_VALUE_COUNT_MISMATCH: u64 = 4;
+
+ + + + + +The property map does not exist + + +
const EPROPERTY_MAP_DOES_NOT_EXIST: u64 = 1;
+
+ + + + + +The key of the property is too long + + +
const EPROPERTY_MAP_KEY_TOO_LONG: u64 = 8;
+
+ + + + + +The number of properties exceeds the maximum + + +
const ETOO_MANY_PROPERTIES: u64 = 3;
+
+ + + + + +Invalid value type specified + + +
const ETYPE_INVALID: u64 = 7;
+
+ + + + + +Maximum number of items in a PropertyMap + + +
const MAX_PROPERTY_MAP_SIZE: u64 = 1000;
+
+ + + + + +Maximum number of characters in a property name + + +
const MAX_PROPERTY_NAME_LENGTH: u64 = 128;
+
+ + + + + + + +
const STRING: u8 = 9;
+
+ + + + + + + +
const U128: u8 = 5;
+
+ + + + + + + +
const U16: u8 = 2;
+
+ + + + + + + +
const U256: u8 = 6;
+
+ + + + + + + +
const U32: u8 = 3;
+
+ + + + + + + +
const U64: u8 = 4;
+
+ + + + + + + +
const U8: u8 = 1;
+
+ + + + + +## Function `init` + + + +
public fun init(ref: &object::ConstructorRef, container: property_map::PropertyMap)
+
+ + + +
+Implementation + + +
public fun init(ref: &ConstructorRef, container: PropertyMap) {
+    let signer = object::generate_signer(ref);
+    move_to(&signer, container);
+}
+
+ + + +
+ + + +## Function `extend` + + + +
public fun extend(ref: &object::ExtendRef, container: property_map::PropertyMap)
+
+ + + +
+Implementation + + +
public fun extend(ref: &ExtendRef, container: PropertyMap) {
+    let signer = object::generate_signer_for_extending(ref);
+    move_to(&signer, container);
+}
+
+ + + +
+ + + +## Function `burn` + +Burns the entire property map + + +
public fun burn(ref: property_map::MutatorRef)
+
+ + + +
+Implementation + + +
public fun burn(ref: MutatorRef) acquires PropertyMap {
+    move_from<PropertyMap>(ref.self);
+}
+
+ + + +
+ + + +## Function `prepare_input` + +Helper for external entry functions to produce a valid container for property values. + + +
public fun prepare_input(keys: vector<string::String>, types: vector<string::String>, values: vector<vector<u8>>): property_map::PropertyMap
+
+ + + +
+Implementation + + +
public fun prepare_input(
+    keys: vector<String>,
+    types: vector<String>,
+    values: vector<vector<u8>>,
+): PropertyMap {
+    let length = vector::length(&keys);
+    assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(ETOO_MANY_PROPERTIES));
+    assert!(length == vector::length(&values), error::invalid_argument(EKEY_VALUE_COUNT_MISMATCH));
+    assert!(length == vector::length(&types), error::invalid_argument(EKEY_TYPE_COUNT_MISMATCH));
+
+    let container = simple_map::create<String, PropertyValue>();
+    while (!vector::is_empty(&keys)) {
+        let key = vector::pop_back(&mut keys);
+        assert!(
+            string::length(&key) <= MAX_PROPERTY_NAME_LENGTH,
+            error::invalid_argument(EPROPERTY_MAP_KEY_TOO_LONG),
+        );
+
+        let value = vector::pop_back(&mut values);
+        let type = vector::pop_back(&mut types);
+
+        let new_type = to_internal_type(type);
+        validate_type(new_type, value);
+
+        simple_map::add(&mut container, key, PropertyValue { value, type: new_type });
+    };
+
+    PropertyMap { inner: container }
+}
+
+ + + +
+ + + +## Function `to_external_type` + +Maps String representation of types from their u8 representation + + +
fun to_external_type(type: u8): string::String
+
+ + + +
+Implementation + + +
inline fun to_external_type(type: u8): String {
+    if (type == BOOL) {
+        string::utf8(b"bool")
+    } else if (type == U8) {
+        string::utf8(b"u8")
+    } else if (type == U16) {
+        string::utf8(b"u16")
+    } else if (type == U32) {
+        string::utf8(b"u32")
+    } else if (type == U64) {
+        string::utf8(b"u64")
+    } else if (type == U128) {
+        string::utf8(b"u128")
+    } else if (type == U256) {
+        string::utf8(b"u256")
+    } else if (type == ADDRESS) {
+        string::utf8(b"address")
+    } else if (type == BYTE_VECTOR) {
+        string::utf8(b"vector<u8>")
+    } else if (type == STRING) {
+        string::utf8(b"0x1::string::String")
+    } else {
+        abort (error::invalid_argument(ETYPE_INVALID))
+    }
+}
+
+ + + +
+ + + +## Function `to_internal_type` + +Maps the String representation of types to u8 + + +
fun to_internal_type(type: string::String): u8
+
+ + + +
+Implementation + + +
inline fun to_internal_type(type: String): u8 {
+    if (type == string::utf8(b"bool")) {
+        BOOL
+    } else if (type == string::utf8(b"u8")) {
+        U8
+    } else if (type == string::utf8(b"u16")) {
+        U16
+    } else if (type == string::utf8(b"u32")) {
+        U32
+    } else if (type == string::utf8(b"u64")) {
+        U64
+    } else if (type == string::utf8(b"u128")) {
+        U128
+    } else if (type == string::utf8(b"u256")) {
+        U256
+    } else if (type == string::utf8(b"address")) {
+        ADDRESS
+    } else if (type == string::utf8(b"vector<u8>")) {
+        BYTE_VECTOR
+    } else if (type == string::utf8(b"0x1::string::String")) {
+        STRING
+    } else {
+        abort (error::invalid_argument(ETYPE_INVALID))
+    }
+}
+
+ + + +
+ + + +## Function `type_info_to_internal_type` + +Maps Move type to u8 representation + + +
fun type_info_to_internal_type<T>(): u8
+
+ + + +
+Implementation + + +
inline fun type_info_to_internal_type<T>(): u8 {
+    let type = type_info::type_name<T>();
+    to_internal_type(type)
+}
+
+ + + +
+ + + +## Function `validate_type` + +Validates property value type against its expected type + + +
fun validate_type(type: u8, value: vector<u8>)
+
+ + + +
+Implementation + + +
inline fun validate_type(type: u8, value: vector<u8>) {
+    if (type == BOOL) {
+        from_bcs::to_bool(value);
+    } else if (type == U8) {
+        from_bcs::to_u8(value);
+    } else if (type == U16) {
+        from_bcs::to_u16(value);
+    } else if (type == U32) {
+        from_bcs::to_u32(value);
+    } else if (type == U64) {
+        from_bcs::to_u64(value);
+    } else if (type == U128) {
+        from_bcs::to_u128(value);
+    } else if (type == U256) {
+        from_bcs::to_u256(value);
+    } else if (type == ADDRESS) {
+        from_bcs::to_address(value);
+    } else if (type == BYTE_VECTOR) {
+        // nothing to validate...
+    } else if (type == STRING) {
+        from_bcs::to_string(value);
+    } else {
+        abort (error::invalid_argument(ETYPE_MISMATCH))
+    };
+}
+
+ + + +
+ + + +## Function `generate_mutator_ref` + + + +
public fun generate_mutator_ref(ref: &object::ConstructorRef): property_map::MutatorRef
+
+ + + +
+Implementation + + +
public fun generate_mutator_ref(ref: &ConstructorRef): MutatorRef {
+    MutatorRef { self: object::address_from_constructor_ref(ref) }
+}
+
+ + + +
+ + + +## Function `contains_key` + + + +
public fun contains_key<T: key>(object: &object::Object<T>, key: &string::String): bool
+
+ + + +
+Implementation + + +
public fun contains_key<T: key>(object: &Object<T>, key: &String): bool acquires PropertyMap {
+    assert_exists(object::object_address(object));
+    let property_map = borrow_global<PropertyMap>(object::object_address(object));
+    simple_map::contains_key(&property_map.inner, key)
+}
+
+ + + +
+ + + +## Function `length` + + + +
public fun length<T: key>(object: &object::Object<T>): u64
+
+ + + +
+Implementation + + +
public fun length<T: key>(object: &Object<T>): u64 acquires PropertyMap {
+    assert_exists(object::object_address(object));
+    let property_map = borrow_global<PropertyMap>(object::object_address(object));
+    simple_map::length(&property_map.inner)
+}
+
+ + + +
+ + + +## Function `read` + +Read the property and get it's external type in it's bcs encoded format + +The preferred method is to use read_<type> where the type is already known. + + +
public fun read<T: key>(object: &object::Object<T>, key: &string::String): (string::String, vector<u8>)
+
+ + + +
+Implementation + + +
public fun read<T: key>(object: &Object<T>, key: &String): (String, vector<u8>) acquires PropertyMap {
+    assert_exists(object::object_address(object));
+    let property_map = borrow_global<PropertyMap>(object::object_address(object));
+    let property_value = simple_map::borrow(&property_map.inner, key);
+    let new_type = to_external_type(property_value.type);
+    (new_type, property_value.value)
+}
+
+ + + +
+ + + +## Function `assert_exists` + + + +
fun assert_exists(object: address)
+
+ + + +
+Implementation + + +
inline fun assert_exists(object: address) {
+    assert!(
+        exists<PropertyMap>(object),
+        error::not_found(EPROPERTY_MAP_DOES_NOT_EXIST),
+    );
+}
+
+ + + +
+ + + +## Function `read_typed` + +Read a type and verify that the type is correct + + +
fun read_typed<T: key, V>(object: &object::Object<T>, key: &string::String): vector<u8>
+
+ + + +
+Implementation + + +
inline fun read_typed<T: key, V>(object: &Object<T>, key: &String): vector<u8> acquires PropertyMap {
+    let (type, value) = read(object, key);
+    assert!(
+        type == type_info::type_name<V>(),
+        error::invalid_argument(ETYPE_MISMATCH),
+    );
+    value
+}
+
+ + + +
+ + + +## Function `read_bool` + + + +
public fun read_bool<T: key>(object: &object::Object<T>, key: &string::String): bool
+
+ + + +
+Implementation + + +
public fun read_bool<T: key>(object: &Object<T>, key: &String): bool acquires PropertyMap {
+    let value = read_typed<T, bool>(object, key);
+    from_bcs::to_bool(value)
+}
+
+ + + +
+ + + +## Function `read_u8` + + + +
public fun read_u8<T: key>(object: &object::Object<T>, key: &string::String): u8
+
+ + + +
+Implementation + + +
public fun read_u8<T: key>(object: &Object<T>, key: &String): u8 acquires PropertyMap {
+    let value = read_typed<T, u8>(object, key);
+    from_bcs::to_u8(value)
+}
+
+ + + +
+ + + +## Function `read_u16` + + + +
public fun read_u16<T: key>(object: &object::Object<T>, key: &string::String): u16
+
+ + + +
+Implementation + + +
public fun read_u16<T: key>(object: &Object<T>, key: &String): u16 acquires PropertyMap {
+    let value = read_typed<T, u16>(object, key);
+    from_bcs::to_u16(value)
+}
+
+ + + +
+ + + +## Function `read_u32` + + + +
public fun read_u32<T: key>(object: &object::Object<T>, key: &string::String): u32
+
+ + + +
+Implementation + + +
public fun read_u32<T: key>(object: &Object<T>, key: &String): u32 acquires PropertyMap {
+    let value = read_typed<T, u32>(object, key);
+    from_bcs::to_u32(value)
+}
+
+ + + +
+ + + +## Function `read_u64` + + + +
public fun read_u64<T: key>(object: &object::Object<T>, key: &string::String): u64
+
+ + + +
+Implementation + + +
public fun read_u64<T: key>(object: &Object<T>, key: &String): u64 acquires PropertyMap {
+    let value = read_typed<T, u64>(object, key);
+    from_bcs::to_u64(value)
+}
+
+ + + +
+ + + +## Function `read_u128` + + + +
public fun read_u128<T: key>(object: &object::Object<T>, key: &string::String): u128
+
+ + + +
+Implementation + + +
public fun read_u128<T: key>(object: &Object<T>, key: &String): u128 acquires PropertyMap {
+    let value = read_typed<T, u128>(object, key);
+    from_bcs::to_u128(value)
+}
+
+ + + +
+ + + +## Function `read_u256` + + + +
public fun read_u256<T: key>(object: &object::Object<T>, key: &string::String): u256
+
+ + + +
+Implementation + + +
public fun read_u256<T: key>(object: &Object<T>, key: &String): u256 acquires PropertyMap {
+    let value = read_typed<T, u256>(object, key);
+    from_bcs::to_u256(value)
+}
+
+ + + +
+ + + +## Function `read_address` + + + +
public fun read_address<T: key>(object: &object::Object<T>, key: &string::String): address
+
+ + + +
+Implementation + + +
public fun read_address<T: key>(object: &Object<T>, key: &String): address acquires PropertyMap {
+    let value = read_typed<T, address>(object, key);
+    from_bcs::to_address(value)
+}
+
+ + + +
+ + + +## Function `read_bytes` + + + +
public fun read_bytes<T: key>(object: &object::Object<T>, key: &string::String): vector<u8>
+
+ + + +
+Implementation + + +
public fun read_bytes<T: key>(object: &Object<T>, key: &String): vector<u8> acquires PropertyMap {
+    let value = read_typed<T, vector<u8>>(object, key);
+    from_bcs::to_bytes(value)
+}
+
+ + + +
+ + + +## Function `read_string` + + + +
public fun read_string<T: key>(object: &object::Object<T>, key: &string::String): string::String
+
+ + + +
+Implementation + + +
public fun read_string<T: key>(object: &Object<T>, key: &String): String acquires PropertyMap {
+    let value = read_typed<T, String>(object, key);
+    from_bcs::to_string(value)
+}
+
+ + + +
+ + + +## Function `add` + +Add a property, already bcs encoded as a vector<u8> + + +
public fun add(ref: &property_map::MutatorRef, key: string::String, type: string::String, value: vector<u8>)
+
+ + + +
+Implementation + + +
public fun add(ref: &MutatorRef, key: String, type: String, value: vector<u8>) acquires PropertyMap {
+    let new_type = to_internal_type(type);
+    validate_type(new_type, value);
+    add_internal(ref, key, new_type, value);
+}
+
+ + + +
+ + + +## Function `add_typed` + +Add a property that isn't already encoded as a vector<u8> + + +
public fun add_typed<T: drop>(ref: &property_map::MutatorRef, key: string::String, value: T)
+
+ + + +
+Implementation + + +
public fun add_typed<T: drop>(ref: &MutatorRef, key: String, value: T) acquires PropertyMap {
+    let type = type_info_to_internal_type<T>();
+    add_internal(ref, key, type, bcs::to_bytes(&value));
+}
+
+ + + +
+ + + +## Function `add_internal` + + + +
fun add_internal(ref: &property_map::MutatorRef, key: string::String, type: u8, value: vector<u8>)
+
+ + + +
+Implementation + + +
inline fun add_internal(ref: &MutatorRef, key: String, type: u8, value: vector<u8>) acquires PropertyMap {
+    assert_exists(ref.self);
+    let property_map = borrow_global_mut<PropertyMap>(ref.self);
+    simple_map::add(&mut property_map.inner, key, PropertyValue { type, value });
+}
+
+ + + +
+ + + +## Function `update` + +Updates a property in place already bcs encoded + + +
public fun update(ref: &property_map::MutatorRef, key: &string::String, type: string::String, value: vector<u8>)
+
+ + + +
+Implementation + + +
public fun update(ref: &MutatorRef, key: &String, type: String, value: vector<u8>) acquires PropertyMap {
+    let new_type = to_internal_type(type);
+    validate_type(new_type, value);
+    update_internal(ref, key, new_type, value);
+}
+
+ + + +
+ + + +## Function `update_typed` + +Updates a property in place that is not already bcs encoded + + +
public fun update_typed<T: drop>(ref: &property_map::MutatorRef, key: &string::String, value: T)
+
+ + + +
+Implementation + + +
public fun update_typed<T: drop>(ref: &MutatorRef, key: &String, value: T) acquires PropertyMap {
+    let type = type_info_to_internal_type<T>();
+    update_internal(ref, key, type, bcs::to_bytes(&value));
+}
+
+ + + +
+ + + +## Function `update_internal` + + + +
fun update_internal(ref: &property_map::MutatorRef, key: &string::String, type: u8, value: vector<u8>)
+
+ + + +
+Implementation + + +
inline fun update_internal(ref: &MutatorRef, key: &String, type: u8, value: vector<u8>) acquires PropertyMap {
+    assert_exists(ref.self);
+    let property_map = borrow_global_mut<PropertyMap>(ref.self);
+    let old_value = simple_map::borrow_mut(&mut property_map.inner, key);
+    *old_value = PropertyValue { type, value };
+}
+
+ + + +
+ + + +## Function `remove` + +Removes a property from the map, ensuring that it does in fact exist + + +
public fun remove(ref: &property_map::MutatorRef, key: &string::String)
+
+ + + +
+Implementation + + +
public fun remove(ref: &MutatorRef, key: &String) acquires PropertyMap {
+    assert_exists(ref.self);
+    let property_map = borrow_global_mut<PropertyMap>(ref.self);
+    simple_map::remove(&mut property_map.inner, key);
+}
+
+ + + +
+ + + +## Function `assert_end_to_end_input` + + + +
fun assert_end_to_end_input(object: object::Object<object::ObjectCore>)
+
+ + + +
+Implementation + + +
fun assert_end_to_end_input(object: Object<ObjectCore>) acquires PropertyMap {
+    assert!(read_bool(&object, &string::utf8(b"bool")), 0);
+    assert!(read_u8(&object, &string::utf8(b"u8")) == 0x12, 1);
+    assert!(read_u16(&object, &string::utf8(b"u16")) == 0x1234, 2);
+    assert!(read_u32(&object, &string::utf8(b"u32")) == 0x12345678, 3);
+    assert!(read_u64(&object, &string::utf8(b"u64")) == 0x1234567812345678, 4);
+    assert!(read_u128(&object, &string::utf8(b"u128")) == 0x12345678123456781234567812345678, 5);
+    assert!(
+        read_u256(
+            &object,
+            &string::utf8(b"u256")
+        ) == 0x1234567812345678123456781234567812345678123456781234567812345678,
+        6
+    );
+    assert!(read_bytes(&object, &string::utf8(b"vector<u8>")) == vector[0x01], 7);
+    assert!(read_string(&object, &string::utf8(b"0x1::string::String")) == string::utf8(b"a"), 8);
+
+    assert!(length(&object) == 9, 9);
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/royalty.md b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/royalty.md new file mode 100644 index 0000000000000..491e716e7b89e --- /dev/null +++ b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/royalty.md @@ -0,0 +1,401 @@ + + + +# Module `0x4::royalty` + +This defines an object-based Royalty. The royalty can be applied to either a collection or a +token. Applications should read the royalty from the token, as it will read the appropriate +royalty. + + +- [Resource `Royalty`](#0x4_royalty_Royalty) +- [Struct `MutatorRef`](#0x4_royalty_MutatorRef) +- [Constants](#@Constants_0) +- [Function `init`](#0x4_royalty_init) +- [Function `update`](#0x4_royalty_update) +- [Function `create`](#0x4_royalty_create) +- [Function `generate_mutator_ref`](#0x4_royalty_generate_mutator_ref) +- [Function `exists_at`](#0x4_royalty_exists_at) +- [Function `delete`](#0x4_royalty_delete) +- [Function `get`](#0x4_royalty_get) +- [Function `denominator`](#0x4_royalty_denominator) +- [Function `numerator`](#0x4_royalty_numerator) +- [Function `payee_address`](#0x4_royalty_payee_address) + + +
use 0x1::error;
+use 0x1::object;
+use 0x1::option;
+
+ + + + + +## Resource `Royalty` + +The royalty of a token within this collection + +Royalties are optional for a collection. Royalty percentage is calculated +by (numerator / denominator) * 100% + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Royalty has copy, drop, key
+
+ + + +
+Fields + + +
+
+numerator: u64 +
+
+ +
+
+denominator: u64 +
+
+ +
+
+payee_address: address +
+
+ The recipient of royalty payments. See the shared_account for how to handle multiple + creators. +
+
+ + +
+ + + +## Struct `MutatorRef` + +This enables creating or overwriting a MutatorRef. + + +
struct MutatorRef has drop, store
+
+ + + +
+Fields + + +
+
+inner: object::ExtendRef +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The royalty denominator cannot be 0 + + +
const EROYALTY_DENOMINATOR_IS_ZERO: u64 = 3;
+
+ + + + + +Royalty does not exist + + +
const EROYALTY_DOES_NOT_EXIST: u64 = 1;
+
+ + + + + +The royalty cannot be greater than 100% + + +
const EROYALTY_EXCEEDS_MAXIMUM: u64 = 2;
+
+ + + + + +## Function `init` + +Add a royalty, given a ConstructorRef. + + +
public fun init(ref: &object::ConstructorRef, royalty: royalty::Royalty)
+
+ + + +
+Implementation + + +
public fun init(ref: &ConstructorRef, royalty: Royalty) {
+    let signer = object::generate_signer(ref);
+    move_to(&signer, royalty);
+}
+
+ + + +
+ + + +## Function `update` + +Set the royalty if it does not exist, replace it otherwise. + + +
public fun update(mutator_ref: &royalty::MutatorRef, royalty: royalty::Royalty)
+
+ + + +
+Implementation + + +
public fun update(mutator_ref: &MutatorRef, royalty: Royalty) acquires Royalty {
+    let addr = object::address_from_extend_ref(&mutator_ref.inner);
+    if (exists<Royalty>(addr)) {
+        move_from<Royalty>(addr);
+    };
+
+    let signer = object::generate_signer_for_extending(&mutator_ref.inner);
+    move_to(&signer, royalty);
+}
+
+ + + +
+ + + +## Function `create` + +Creates a new royalty, verifying that it is a valid percentage + + +
public fun create(numerator: u64, denominator: u64, payee_address: address): royalty::Royalty
+
+ + + +
+Implementation + + +
public fun create(numerator: u64, denominator: u64, payee_address: address): Royalty {
+    assert!(denominator != 0, error::out_of_range(EROYALTY_DENOMINATOR_IS_ZERO));
+    assert!(numerator <= denominator, error::out_of_range(EROYALTY_EXCEEDS_MAXIMUM));
+
+    Royalty { numerator, denominator, payee_address }
+}
+
+ + + +
+ + + +## Function `generate_mutator_ref` + + + +
public fun generate_mutator_ref(ref: object::ExtendRef): royalty::MutatorRef
+
+ + + +
+Implementation + + +
public fun generate_mutator_ref(ref: ExtendRef): MutatorRef {
+    MutatorRef { inner: ref }
+}
+
+ + + +
+ + + +## Function `exists_at` + + + +
public fun exists_at(addr: address): bool
+
+ + + +
+Implementation + + +
public fun exists_at(addr: address): bool {
+    exists<Royalty>(addr)
+}
+
+ + + +
+ + + +## Function `delete` + + + +
public(friend) fun delete(addr: address)
+
+ + + +
+Implementation + + +
public(friend) fun delete(addr: address) acquires Royalty {
+    assert!(exists<Royalty>(addr), error::not_found(EROYALTY_DOES_NOT_EXIST));
+    move_from<Royalty>(addr);
+}
+
+ + + +
+ + + +## Function `get` + + + +
public fun get<T: key>(maybe_royalty: object::Object<T>): option::Option<royalty::Royalty>
+
+ + + +
+Implementation + + +
public fun get<T: key>(maybe_royalty: Object<T>): Option<Royalty> acquires Royalty {
+    let obj_addr = object::object_address(&maybe_royalty);
+    if (exists<Royalty>(obj_addr)) {
+        option::some(*borrow_global<Royalty>(obj_addr))
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `denominator` + + + +
public fun denominator(royalty: &royalty::Royalty): u64
+
+ + + +
+Implementation + + +
public fun denominator(royalty: &Royalty): u64 {
+    royalty.denominator
+}
+
+ + + +
+ + + +## Function `numerator` + + + +
public fun numerator(royalty: &royalty::Royalty): u64
+
+ + + +
+Implementation + + +
public fun numerator(royalty: &Royalty): u64 {
+    royalty.numerator
+}
+
+ + + +
+ + + +## Function `payee_address` + + + +
public fun payee_address(royalty: &royalty::Royalty): address
+
+ + + +
+Implementation + + +
public fun payee_address(royalty: &Royalty): address {
+    royalty.payee_address
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/token.md b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/token.md new file mode 100644 index 0000000000000..637afe80f44ba --- /dev/null +++ b/aptos-move/framework/aptos-token-objects/tests/compiler-v2-doc/token.md @@ -0,0 +1,1941 @@ + + + +# Module `0x4::token` + +This defines an object-based Token. The key differentiating features from the Aptos standard +token are: +* Decoupled token ownership from token data. +* Explicit data model for token metadata via adjacent resources +* Extensible framework for tokens + + +- [Resource `Token`](#0x4_token_Token) +- [Resource `TokenIdentifiers`](#0x4_token_TokenIdentifiers) +- [Resource `ConcurrentTokenIdentifiers`](#0x4_token_ConcurrentTokenIdentifiers) +- [Struct `BurnRef`](#0x4_token_BurnRef) +- [Struct `MutatorRef`](#0x4_token_MutatorRef) +- [Struct `MutationEvent`](#0x4_token_MutationEvent) +- [Struct `Mutation`](#0x4_token_Mutation) +- [Constants](#@Constants_0) +- [Function `create_common`](#0x4_token_create_common) +- [Function `create_common_with_collection`](#0x4_token_create_common_with_collection) +- [Function `create_common_with_collection_as_owner`](#0x4_token_create_common_with_collection_as_owner) +- [Function `create_common_with_collection_internal`](#0x4_token_create_common_with_collection_internal) +- [Function `create_token`](#0x4_token_create_token) +- [Function `create`](#0x4_token_create) +- [Function `create_token_as_collection_owner`](#0x4_token_create_token_as_collection_owner) +- [Function `create_numbered_token_object`](#0x4_token_create_numbered_token_object) +- [Function `create_numbered_token`](#0x4_token_create_numbered_token) +- [Function `create_numbered_token_as_collection_owner`](#0x4_token_create_numbered_token_as_collection_owner) +- [Function `create_named_token_object`](#0x4_token_create_named_token_object) +- [Function `create_named_token`](#0x4_token_create_named_token) +- [Function `create_named_token_as_collection_owner`](#0x4_token_create_named_token_as_collection_owner) +- [Function `create_named_token_from_seed`](#0x4_token_create_named_token_from_seed) +- [Function `create_named_token_from_seed_as_collection_owner`](#0x4_token_create_named_token_from_seed_as_collection_owner) +- [Function `create_from_account`](#0x4_token_create_from_account) +- [Function `create_token_address`](#0x4_token_create_token_address) +- [Function `create_token_address_with_seed`](#0x4_token_create_token_address_with_seed) +- [Function `create_token_seed`](#0x4_token_create_token_seed) +- [Function `create_token_name_with_seed`](#0x4_token_create_token_name_with_seed) +- [Function `generate_mutator_ref`](#0x4_token_generate_mutator_ref) +- [Function `generate_burn_ref`](#0x4_token_generate_burn_ref) +- [Function `address_from_burn_ref`](#0x4_token_address_from_burn_ref) +- [Function `borrow`](#0x4_token_borrow) +- [Function `creator`](#0x4_token_creator) +- [Function `collection_name`](#0x4_token_collection_name) +- [Function `collection_object`](#0x4_token_collection_object) +- [Function `description`](#0x4_token_description) +- [Function `name`](#0x4_token_name) +- [Function `uri`](#0x4_token_uri) +- [Function `royalty`](#0x4_token_royalty) +- [Function `index`](#0x4_token_index) +- [Function `borrow_mut`](#0x4_token_borrow_mut) +- [Function `burn`](#0x4_token_burn) +- [Function `set_description`](#0x4_token_set_description) +- [Function `set_name`](#0x4_token_set_name) +- [Function `set_uri`](#0x4_token_set_uri) + + +
use 0x1::aggregator_v2;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::object;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::vector;
+use 0x4::collection;
+use 0x4::royalty;
+
+ + + + + +## Resource `Token` + +Represents the common fields to all tokens. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Token has key
+
+ + + +
+Fields + + +
+
+collection: object::Object<collection::Collection> +
+
+ The collection from which this token resides. +
+
+index: u64 +
+
+ Deprecated in favor of index inside TokenIdentifiers. + Was populated until concurrent_token_v2_enabled feature flag was enabled. + + Unique identifier within the collection, optional, 0 means unassigned +
+
+description: string::String +
+
+ A brief description of the token. +
+
+name: string::String +
+
+ Deprecated in favor of name inside TokenIdentifiers. + Was populated until concurrent_token_v2_enabled feature flag was enabled. + + The name of the token, which should be unique within the collection; the length of name + should be smaller than 128, characters, eg: "Aptos Animal #1234" +
+
+uri: string::String +
+
+ The Uniform Resource Identifier (uri) pointing to the JSON file stored in off-chain + storage; the URL length will likely need a maximum any suggestions? +
+
+mutation_events: event::EventHandle<token::MutationEvent> +
+
+ Emitted upon any mutation of the token. +
+
+ + +
+ + + +## Resource `TokenIdentifiers` + +Represents first addition to the common fields for all tokens +Started being populated once aggregator_v2_api_enabled was enabled. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct TokenIdentifiers has key
+
+ + + +
+Fields + + +
+
+index: aggregator_v2::AggregatorSnapshot<u64> +
+
+ Unique identifier within the collection, optional, 0 means unassigned +
+
+name: aggregator_v2::DerivedStringSnapshot +
+
+ The name of the token, which should be unique within the collection; the length of name + should be smaller than 128, characters, eg: "Aptos Animal #1234" +
+
+ + +
+ + + +## Resource `ConcurrentTokenIdentifiers` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+#[deprecated]
+struct ConcurrentTokenIdentifiers has key
+
+ + + +
+Fields + + +
+
+index: aggregator_v2::AggregatorSnapshot<u64> +
+
+ +
+
+name: aggregator_v2::AggregatorSnapshot<string::String> +
+
+ +
+
+ + +
+ + + +## Struct `BurnRef` + +This enables burning an NFT, if possible, it will also delete the object. Note, the data +in inner and self occupies 32-bytes each, rather than have both, this data structure makes +a small optimization to support either and take a fixed amount of 34-bytes. + + +
struct BurnRef has drop, store
+
+ + + +
+Fields + + +
+
+inner: option::Option<object::DeleteRef> +
+
+ +
+
+self: option::Option<address> +
+
+ +
+
+ + +
+ + + +## Struct `MutatorRef` + +This enables mutating description and URI by higher level services. + + +
struct MutatorRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Struct `MutationEvent` + +Contains the mutated fields name. This makes the life of indexers easier, so that they can +directly understand the behavior in a writeset. + + +
struct MutationEvent has drop, store
+
+ + + +
+Fields + + +
+
+mutated_field_name: string::String +
+
+ +
+
+old_value: string::String +
+
+ +
+
+new_value: string::String +
+
+ +
+
+ + +
+ + + +## Struct `Mutation` + + + +
#[event]
+struct Mutation has drop, store
+
+ + + +
+Fields + + +
+
+token_address: address +
+
+ +
+
+mutated_field_name: string::String +
+
+ +
+
+old_value: string::String +
+
+ +
+
+new_value: string::String +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The URI is over the maximum length + + +
const EURI_TOO_LONG: u64 = 5;
+
+ + + + + + + +
const MAX_URI_LENGTH: u64 = 512;
+
+ + + + + +The calling signer is not the owner + + +
const ENOT_OWNER: u64 = 8;
+
+ + + + + +The collection owner feature is not supported + + +
const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 9;
+
+ + + + + +The description is over the maximum length + + +
const EDESCRIPTION_TOO_LONG: u64 = 6;
+
+ + + + + + + +
const MAX_DESCRIPTION_LENGTH: u64 = 2048;
+
+ + + + + +The field being changed is not mutable + + +
const EFIELD_NOT_MUTABLE: u64 = 3;
+
+ + + + + +The provided signer is not the creator + + +
const ENOT_CREATOR: u64 = 2;
+
+ + + + + +The seed is over the maximum length + + +
const ESEED_TOO_LONG: u64 = 7;
+
+ + + + + +The token does not exist + + +
const ETOKEN_DOES_NOT_EXIST: u64 = 1;
+
+ + + + + +The token name is over the maximum length + + +
const ETOKEN_NAME_TOO_LONG: u64 = 4;
+
+ + + + + + + +
const MAX_TOKEN_NAME_LENGTH: u64 = 128;
+
+ + + + + + + +
const MAX_TOKEN_SEED_LENGTH: u64 = 128;
+
+ + + + + +## Function `create_common` + + + +
fun create_common(creator: &signer, constructor_ref: &object::ConstructorRef, collection_name: string::String, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common(
+    creator: &signer,
+    constructor_ref: &ConstructorRef,
+    collection_name: String,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    let creator_address = signer::address_of(creator);
+    let collection_addr = collection::create_collection_address(&creator_address, &collection_name);
+    let collection = object::address_to_object<Collection>(collection_addr);
+
+    create_common_with_collection(
+        creator,
+        constructor_ref,
+        collection,
+        description,
+        name_prefix,
+        name_with_index_suffix,
+        royalty,
+        uri
+    )
+}
+
+ + + +
+ + + +## Function `create_common_with_collection` + + + +
fun create_common_with_collection(creator: &signer, constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection(
+    creator: &signer,
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    assert!(collection::creator(collection) == signer::address_of(creator), error::unauthenticated(ENOT_CREATOR));
+
+    create_common_with_collection_internal(
+        constructor_ref,
+        collection,
+        description,
+        name_prefix,
+        name_with_index_suffix,
+        royalty,
+        uri
+    );
+}
+
+ + + +
+ + + +## Function `create_common_with_collection_as_owner` + + + +
fun create_common_with_collection_as_owner(owner: &signer, constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection_as_owner(
+    owner: &signer,
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+    assert!(object::owner(collection) == signer::address_of(owner), error::unauthenticated(ENOT_OWNER));
+
+    create_common_with_collection_internal(
+        constructor_ref,
+        collection,
+        description,
+        name_prefix,
+        name_with_index_suffix,
+        royalty,
+        uri
+    );
+}
+
+ + + +
+ + + +## Function `create_common_with_collection_internal` + + + +
fun create_common_with_collection_internal(constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection_internal(
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    if (option::is_some(&name_with_index_suffix)) {
+        // Be conservative, as we don't know what length the index will be, and assume worst case (20 chars in MAX_U64)
+        assert!(
+            string::length(&name_prefix) + 20 + string::length(
+                option::borrow(&name_with_index_suffix)
+            ) <= MAX_TOKEN_NAME_LENGTH,
+            error::out_of_range(ETOKEN_NAME_TOO_LONG)
+        );
+    } else {
+        assert!(string::length(&name_prefix) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
+    };
+    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+
+    let object_signer = object::generate_signer(constructor_ref);
+
+    let index = option::destroy_with_default(
+        collection::increment_supply(&collection, signer::address_of(&object_signer)),
+        aggregator_v2::create_snapshot<u64>(0)
+    );
+
+    // If create_numbered_token called us, add index to the name.
+    let name = if (option::is_some(&name_with_index_suffix)) {
+        aggregator_v2::derive_string_concat(name_prefix, &index, option::extract(&mut name_with_index_suffix))
+    } else {
+        aggregator_v2::create_derived_string(name_prefix)
+    };
+
+    let deprecated_index = 0;
+    let deprecated_name = string::utf8(b"");
+
+    let token_concurrent = TokenIdentifiers {
+        index,
+        name,
+    };
+    move_to(&object_signer, token_concurrent);
+
+    let token = Token {
+        collection,
+        index: deprecated_index,
+        description,
+        name: deprecated_name,
+        uri,
+        mutation_events: object::new_event_handle(&object_signer),
+    };
+    move_to(&object_signer, token);
+
+    if (option::is_some(&royalty)) {
+        royalty::init(constructor_ref, option::extract(&mut royalty))
+    };
+}
+
+ + + +
+ + + +## Function `create_token` + +Creates a new token object with a unique address and returns the ConstructorRef +for additional specialization. +This takes in the collection object instead of the collection name. +This function must be called if the collection name has been previously changed. + + +
public fun create_token(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_token(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create` + +Creates a new token object with a unique address and returns the ConstructorRef +for additional specialization. + + +
public fun create(creator: &signer, collection_name: string::String, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create(
+    creator: &signer,
+    collection_name: String,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common(
+        creator,
+        &constructor_ref,
+        collection_name,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_token_as_collection_owner` + +Same functionality as create_token, but the token can only be created by the collection owner. + + +
public fun create_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_numbered_token_object` + +Creates a new token object with a unique address and returns the ConstructorRef +for additional specialization. +The name is created by concatenating the (name_prefix, index, name_suffix). +This function allows creating tokens in parallel, from the same collection, +while providing sequential names. + +This takes in the collection object instead of the collection name. +This function must be called if the collection name has been previously changed. + + +
public fun create_numbered_token_object(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name_with_index_prefix: string::String, name_with_index_suffix: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_numbered_token_object(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name_with_index_prefix: String,
+    name_with_index_suffix: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name_with_index_prefix,
+        option::some(name_with_index_suffix),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_numbered_token` + +Creates a new token object with a unique address and returns the ConstructorRef +for additional specialization. +The name is created by concatenating the (name_prefix, index, name_suffix). +This function will allow creating tokens in parallel, from the same collection, +while providing sequential names. + + +
public fun create_numbered_token(creator: &signer, collection_name: string::String, description: string::String, name_with_index_prefix: string::String, name_with_index_suffix: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_numbered_token(
+    creator: &signer,
+    collection_name: String,
+    description: String,
+    name_with_index_prefix: String,
+    name_with_index_suffix: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common(
+        creator,
+        &constructor_ref,
+        collection_name,
+        description,
+        name_with_index_prefix,
+        option::some(name_with_index_suffix),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_numbered_token_as_collection_owner` + +Same functionality as create_numbered_token_object, but the token can only be created by the collection owner. + + +
public fun create_numbered_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name_with_index_prefix: string::String, name_with_index_suffix: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_numbered_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name_with_index_prefix: String,
+    name_with_index_suffix: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name_with_index_prefix,
+        option::some(name_with_index_suffix),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_named_token_object` + +Creates a new token object from a token name and returns the ConstructorRef for +additional specialization. +This function must be called if the collection name has been previously changed. + + +
public fun create_named_token_object(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_object(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_seed(&collection::name(collection), &name);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_named_token` + +Creates a new token object from a token name and returns the ConstructorRef for +additional specialization. + + +
public fun create_named_token(creator: &signer, collection_name: string::String, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token(
+    creator: &signer,
+    collection_name: String,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_seed(&collection_name, &name);
+
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common(
+        creator,
+        &constructor_ref,
+        collection_name,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_named_token_as_collection_owner` + +Same functionality as create_named_token_object, but the token can only be created by the collection owner. + + +
public fun create_named_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_seed(&collection::name(collection), &name);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_named_token_from_seed` + +Creates a new token object from a token name and seed. +Returns the ConstructorRef for additional specialization. +This function must be called if the collection name has been previously changed. + + +
public fun create_named_token_from_seed(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, seed: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_from_seed(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    seed: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_name_with_seed(&collection::name(collection), &name, &seed);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection(creator, &constructor_ref, collection, description, name, option::none(), royalty, uri);
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_named_token_from_seed_as_collection_owner` + +Same functionality as create_named_token_from_seed, but the token can only be created by the collection owner. + + +
public fun create_named_token_from_seed_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, seed: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_from_seed_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    seed: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_name_with_seed(&collection::name(collection), &name, &seed);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_from_account` + +DEPRECATED: Use create instead for identical behavior. + +Creates a new token object from an account GUID and returns the ConstructorRef for +additional specialization. + + +
#[deprecated]
+public fun create_from_account(creator: &signer, collection_name: string::String, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_from_account(
+    creator: &signer,
+    collection_name: String,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let constructor_ref = object::create_object_from_account(creator);
+    create_common(
+        creator,
+        &constructor_ref,
+        collection_name,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + + +
+ + + +## Function `create_token_address` + +Generates the token's address based upon the creator's address, the collection's name and the token's name. + + +
public fun create_token_address(creator: &address, collection: &string::String, name: &string::String): address
+
+ + + +
+Implementation + + +
public fun create_token_address(creator: &address, collection: &String, name: &String): address {
+    object::create_object_address(creator, create_token_seed(collection, name))
+}
+
+ + + +
+ + + +## Function `create_token_address_with_seed` + +Generates the token's address based upon the creator's address, the collection object and the token's name and seed. + + +
#[view]
+public fun create_token_address_with_seed(creator: address, collection: string::String, name: string::String, seed: string::String): address
+
+ + + +
+Implementation + + +
public fun create_token_address_with_seed(creator: address, collection: String, name: String, seed: String): address {
+    let seed = create_token_name_with_seed(&collection, &name, &seed);
+    object::create_object_address(&creator, seed)
+}
+
+ + + +
+ + + +## Function `create_token_seed` + +Named objects are derived from a seed, the token's seed is its name appended to the collection's name. + + +
public fun create_token_seed(collection: &string::String, name: &string::String): vector<u8>
+
+ + + +
+Implementation + + +
public fun create_token_seed(collection: &String, name: &String): vector<u8> {
+    assert!(string::length(name) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
+    let seed = *string::bytes(collection);
+    vector::append(&mut seed, b"::");
+    vector::append(&mut seed, *string::bytes(name));
+    seed
+}
+
+ + + +
+ + + +## Function `create_token_name_with_seed` + + + +
public fun create_token_name_with_seed(collection: &string::String, name: &string::String, seed: &string::String): vector<u8>
+
+ + + +
+Implementation + + +
public fun create_token_name_with_seed(collection: &String, name: &String, seed: &String): vector<u8> {
+    assert!(string::length(seed) <= MAX_TOKEN_SEED_LENGTH, error::out_of_range(ESEED_TOO_LONG));
+    let seeds = create_token_seed(collection, name);
+    vector::append(&mut seeds, *string::bytes(seed));
+    seeds
+}
+
+ + + +
+ + + +## Function `generate_mutator_ref` + +Creates a MutatorRef, which gates the ability to mutate any fields that support mutation. + + +
public fun generate_mutator_ref(ref: &object::ConstructorRef): token::MutatorRef
+
+ + + +
+Implementation + + +
public fun generate_mutator_ref(ref: &ConstructorRef): MutatorRef {
+    let object = object::object_from_constructor_ref<Token>(ref);
+    MutatorRef { self: object::object_address(&object) }
+}
+
+ + + +
+ + + +## Function `generate_burn_ref` + +Creates a BurnRef, which gates the ability to burn the given token. + + +
public fun generate_burn_ref(ref: &object::ConstructorRef): token::BurnRef
+
+ + + +
+Implementation + + +
public fun generate_burn_ref(ref: &ConstructorRef): BurnRef {
+    let (inner, self) = if (object::can_generate_delete_ref(ref)) {
+        let delete_ref = object::generate_delete_ref(ref);
+        (option::some(delete_ref), option::none())
+    } else {
+        let addr = object::address_from_constructor_ref(ref);
+        (option::none(), option::some(addr))
+    };
+    BurnRef { self, inner }
+}
+
+ + + +
+ + + +## Function `address_from_burn_ref` + +Extracts the tokens address from a BurnRef. + + +
public fun address_from_burn_ref(ref: &token::BurnRef): address
+
+ + + +
+Implementation + + +
public fun address_from_burn_ref(ref: &BurnRef): address {
+    if (option::is_some(&ref.inner)) {
+        object::address_from_delete_ref(option::borrow(&ref.inner))
+    } else {
+        *option::borrow(&ref.self)
+    }
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
fun borrow<T: key>(token: &object::Object<T>): &token::Token
+
+ + + +
+Implementation + + +
inline fun borrow<T: key>(token: &Object<T>): &Token acquires Token {
+    let token_address = object::object_address(token);
+    assert!(
+        exists<Token>(token_address),
+        error::not_found(ETOKEN_DOES_NOT_EXIST),
+    );
+    borrow_global<Token>(token_address)
+}
+
+ + + +
+ + + +## Function `creator` + + + +
#[view]
+public fun creator<T: key>(token: object::Object<T>): address
+
+ + + +
+Implementation + + +
public fun creator<T: key>(token: Object<T>): address acquires Token {
+    collection::creator(borrow(&token).collection)
+}
+
+ + + +
+ + + +## Function `collection_name` + + + +
#[view]
+public fun collection_name<T: key>(token: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun collection_name<T: key>(token: Object<T>): String acquires Token {
+    collection::name(borrow(&token).collection)
+}
+
+ + + +
+ + + +## Function `collection_object` + + + +
#[view]
+public fun collection_object<T: key>(token: object::Object<T>): object::Object<collection::Collection>
+
+ + + +
+Implementation + + +
public fun collection_object<T: key>(token: Object<T>): Object<Collection> acquires Token {
+    borrow(&token).collection
+}
+
+ + + +
+ + + +## Function `description` + + + +
#[view]
+public fun description<T: key>(token: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun description<T: key>(token: Object<T>): String acquires Token {
+    borrow(&token).description
+}
+
+ + + +
+ + + +## Function `name` + +Avoid this method in the same transaction as the token is minted +as that would prohibit transactions to be executed in parallel. + + +
#[view]
+public fun name<T: key>(token: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun name<T: key>(token: Object<T>): String acquires Token, TokenIdentifiers {
+    let token_address = object::object_address(&token);
+    if (exists<TokenIdentifiers>(token_address)) {
+        aggregator_v2::read_derived_string(&borrow_global<TokenIdentifiers>(token_address).name)
+    } else {
+        borrow(&token).name
+    }
+}
+
+ + + +
+ + + +## Function `uri` + + + +
#[view]
+public fun uri<T: key>(token: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun uri<T: key>(token: Object<T>): String acquires Token {
+    borrow(&token).uri
+}
+
+ + + +
+ + + +## Function `royalty` + + + +
#[view]
+public fun royalty<T: key>(token: object::Object<T>): option::Option<royalty::Royalty>
+
+ + + +
+Implementation + + +
public fun royalty<T: key>(token: Object<T>): Option<Royalty> acquires Token {
+    borrow(&token);
+    let royalty = royalty::get(token);
+    if (option::is_some(&royalty)) {
+        royalty
+    } else {
+        let creator = creator(token);
+        let collection_name = collection_name(token);
+        let collection_address = collection::create_collection_address(&creator, &collection_name);
+        let collection = object::address_to_object<collection::Collection>(collection_address);
+        royalty::get(collection)
+    }
+}
+
+ + + +
+ + + +## Function `index` + +Avoid this method in the same transaction as the token is minted +as that would prohibit transactions to be executed in parallel. + + +
#[view]
+public fun index<T: key>(token: object::Object<T>): u64
+
+ + + +
+Implementation + + +
public fun index<T: key>(token: Object<T>): u64 acquires Token, TokenIdentifiers {
+    let token_address = object::object_address(&token);
+    if (exists<TokenIdentifiers>(token_address)) {
+        aggregator_v2::read_snapshot(&borrow_global<TokenIdentifiers>(token_address).index)
+    } else {
+        borrow(&token).index
+    }
+}
+
+ + + +
+ + + +## Function `borrow_mut` + + + +
fun borrow_mut(mutator_ref: &token::MutatorRef): &mut token::Token
+
+ + + +
+Implementation + + +
inline fun borrow_mut(mutator_ref: &MutatorRef): &mut Token acquires Token {
+    assert!(
+        exists<Token>(mutator_ref.self),
+        error::not_found(ETOKEN_DOES_NOT_EXIST),
+    );
+    borrow_global_mut<Token>(mutator_ref.self)
+}
+
+ + + +
+ + + +## Function `burn` + + + +
public fun burn(burn_ref: token::BurnRef)
+
+ + + +
+Implementation + + +
public fun burn(burn_ref: BurnRef) acquires Token, TokenIdentifiers {
+    let (addr, previous_owner) = if (option::is_some(&burn_ref.inner)) {
+        let delete_ref = option::extract(&mut burn_ref.inner);
+        let addr = object::address_from_delete_ref(&delete_ref);
+        let previous_owner = object::owner(object::address_to_object<Token>(addr));
+        object::delete(delete_ref);
+        (addr, previous_owner)
+    } else {
+        let addr = option::extract(&mut burn_ref.self);
+        let previous_owner = object::owner(object::address_to_object<Token>(addr));
+        (addr, previous_owner)
+    };
+
+    if (royalty::exists_at(addr)) {
+        royalty::delete(addr)
+    };
+
+    let Token {
+        collection,
+        index: deprecated_index,
+        description: _,
+        name: _,
+        uri: _,
+        mutation_events,
+    } = move_from<Token>(addr);
+
+    let index = if (exists<TokenIdentifiers>(addr)) {
+        let TokenIdentifiers {
+            index,
+            name: _,
+        } = move_from<TokenIdentifiers>(addr);
+        aggregator_v2::read_snapshot(&index)
+    } else {
+        deprecated_index
+    };
+
+    event::destroy_handle(mutation_events);
+    collection::decrement_supply(&collection, addr, option::some(index), previous_owner);
+}
+
+ + + +
+ + + +## Function `set_description` + + + +
public fun set_description(mutator_ref: &token::MutatorRef, description: string::String)
+
+ + + +
+Implementation + + +
public fun set_description(mutator_ref: &MutatorRef, description: String) acquires Token {
+    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+    let token = borrow_mut(mutator_ref);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Mutation {
+            token_address: mutator_ref.self,
+            mutated_field_name: string::utf8(b"description"),
+            old_value: token.description,
+            new_value: description
+        })
+    };
+    event::emit_event(
+        &mut token.mutation_events,
+        MutationEvent {
+            mutated_field_name: string::utf8(b"description"),
+            old_value: token.description,
+            new_value: description
+        },
+    );
+    token.description = description;
+}
+
+ + + +
+ + + +## Function `set_name` + + + +
public fun set_name(mutator_ref: &token::MutatorRef, name: string::String)
+
+ + + +
+Implementation + + +
public fun set_name(mutator_ref: &MutatorRef, name: String) acquires Token, TokenIdentifiers {
+    assert!(string::length(&name) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
+
+    let token = borrow_mut(mutator_ref);
+
+    let old_name = if (exists<TokenIdentifiers>(mutator_ref.self)) {
+        let token_concurrent = borrow_global_mut<TokenIdentifiers>(mutator_ref.self);
+        let old_name = aggregator_v2::read_derived_string(&token_concurrent.name);
+        token_concurrent.name = aggregator_v2::create_derived_string(name);
+        old_name
+    } else {
+        let old_name = token.name;
+        token.name = name;
+        old_name
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Mutation {
+            token_address: mutator_ref.self,
+            mutated_field_name: string::utf8(b"name"),
+            old_value: old_name,
+            new_value: name
+        })
+    };
+    event::emit_event(
+        &mut token.mutation_events,
+        MutationEvent {
+            mutated_field_name: string::utf8(b"name"),
+            old_value: old_name,
+            new_value: name
+        },
+    );
+}
+
+ + + +
+ + + +## Function `set_uri` + + + +
public fun set_uri(mutator_ref: &token::MutatorRef, uri: string::String)
+
+ + + +
+Implementation + + +
public fun set_uri(mutator_ref: &MutatorRef, uri: String) acquires Token {
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    let token = borrow_mut(mutator_ref);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Mutation {
+            token_address: mutator_ref.self,
+            mutated_field_name: string::utf8(b"uri"),
+            old_value: token.uri,
+            new_value: uri,
+        })
+    };
+    event::emit_event(
+        &mut token.mutation_events,
+        MutationEvent {
+            mutated_field_name: string::utf8(b"uri"),
+            old_value: token.uri,
+            new_value: uri,
+        },
+    );
+    token.uri = uri;
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token/doc/token.md b/aptos-move/framework/aptos-token/doc/token.md index 1737c8c7df73b..c7632c39c131e 100644 --- a/aptos-move/framework/aptos-token/doc/token.md +++ b/aptos-move/framework/aptos-token/doc/token.md @@ -299,7 +299,7 @@ globally unique identifier of tokendata collection: string::String
- The name of collection; this is unique under the same account, eg: "Supra Animal Collection" + The name of collection; this is unique under the same account, eg: "Aptos Animal Collection"
name: string::String @@ -363,7 +363,7 @@ The shared TokenData by tokens with different property_version name: string::String
- The name of the token, which should be unique within the collection; the length of name should be smaller than 128, characters, eg: "Supra Animal #1234" + The name of the token, which should be unique within the collection; the length of name should be smaller than 128, characters, eg: "Aptos Animal #1234"
description: string::String @@ -652,7 +652,7 @@ Represent the collection metadata description: string::String
- A description for the token collection Eg: "Supra Toad Overload" + A description for the token collection Eg: "Aptos Toad Overload"
name: string::String @@ -677,7 +677,7 @@ Represent the collection metadata
If maximal is a non-zero value, the number of created TokenData entries should be smaller or equal to this maximum - If maximal is 0, Supra doesn't track the supply of this collection, and there is no limit + If maximal is 0, Aptos doesn't track the supply of this collection, and there is no limit
mutability_config: token::CollectionMutabilityConfig diff --git a/aptos-move/framework/aptos-token/tests/compiler-v2-doc/overview.md b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/overview.md new file mode 100644 index 0000000000000..08aa8f39100f3 --- /dev/null +++ b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/overview.md @@ -0,0 +1,22 @@ + + + +# Aptos Token Framework + + +This is the reference documentation of the Aptos Token framework. + + + + +## Index + + +- [`0x3::property_map`](property_map.md#0x3_property_map) +- [`0x3::token`](token.md#0x3_token) +- [`0x3::token_coin_swap`](token_coin_swap.md#0x3_token_coin_swap) +- [`0x3::token_event_store`](token_event_store.md#0x3_token_event_store) +- [`0x3::token_transfers`](token_transfers.md#0x3_token_transfers) + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token/tests/compiler-v2-doc/property_map.md b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/property_map.md new file mode 100644 index 0000000000000..7769ccfc8c65c --- /dev/null +++ b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/property_map.md @@ -0,0 +1,1369 @@ + + + +# Module `0x3::property_map` + +PropertyMap is a specialization of SimpleMap for Tokens. +It maps a String key to a PropertyValue that consists of type (string) and value (vector) +It provides basic on-chain serialization of primitive and string to property value with type information +It also supports deserializing property value to it original type. + + +- [Struct `PropertyMap`](#0x3_property_map_PropertyMap) +- [Struct `PropertyValue`](#0x3_property_map_PropertyValue) +- [Constants](#@Constants_0) +- [Function `new`](#0x3_property_map_new) +- [Function `new_with_key_and_property_value`](#0x3_property_map_new_with_key_and_property_value) +- [Function `empty`](#0x3_property_map_empty) +- [Function `contains_key`](#0x3_property_map_contains_key) +- [Function `add`](#0x3_property_map_add) +- [Function `length`](#0x3_property_map_length) +- [Function `borrow`](#0x3_property_map_borrow) +- [Function `keys`](#0x3_property_map_keys) +- [Function `types`](#0x3_property_map_types) +- [Function `values`](#0x3_property_map_values) +- [Function `read_string`](#0x3_property_map_read_string) +- [Function `read_u8`](#0x3_property_map_read_u8) +- [Function `read_u64`](#0x3_property_map_read_u64) +- [Function `read_address`](#0x3_property_map_read_address) +- [Function `read_u128`](#0x3_property_map_read_u128) +- [Function `read_bool`](#0x3_property_map_read_bool) +- [Function `borrow_value`](#0x3_property_map_borrow_value) +- [Function `borrow_type`](#0x3_property_map_borrow_type) +- [Function `remove`](#0x3_property_map_remove) +- [Function `update_property_map`](#0x3_property_map_update_property_map) +- [Function `update_property_value`](#0x3_property_map_update_property_value) +- [Function `create_property_value_raw`](#0x3_property_map_create_property_value_raw) +- [Function `create_property_value`](#0x3_property_map_create_property_value) +- [Specification](#@Specification_1) + - [Function `new`](#@Specification_1_new) + - [Function `new_with_key_and_property_value`](#@Specification_1_new_with_key_and_property_value) + - [Function `empty`](#@Specification_1_empty) + - [Function `contains_key`](#@Specification_1_contains_key) + - [Function `add`](#@Specification_1_add) + - [Function `length`](#@Specification_1_length) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `keys`](#@Specification_1_keys) + - [Function `types`](#@Specification_1_types) + - [Function `values`](#@Specification_1_values) + - [Function `read_string`](#@Specification_1_read_string) + - [Function `read_u8`](#@Specification_1_read_u8) + - [Function `read_u64`](#@Specification_1_read_u64) + - [Function `read_address`](#@Specification_1_read_address) + - [Function `read_u128`](#@Specification_1_read_u128) + - [Function `read_bool`](#@Specification_1_read_bool) + - [Function `borrow_value`](#@Specification_1_borrow_value) + - [Function `borrow_type`](#@Specification_1_borrow_type) + - [Function `remove`](#@Specification_1_remove) + - [Function `update_property_map`](#@Specification_1_update_property_map) + - [Function `update_property_value`](#@Specification_1_update_property_value) + - [Function `create_property_value_raw`](#@Specification_1_create_property_value_raw) + - [Function `create_property_value`](#@Specification_1_create_property_value) + + +
use 0x1::bcs;
+use 0x1::error;
+use 0x1::from_bcs;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::type_info;
+
+ + + + + +## Struct `PropertyMap` + + + +
struct PropertyMap has copy, drop, store
+
+ + + +
+Fields + + +
+
+map: simple_map::SimpleMap<string::String, property_map::PropertyValue> +
+
+ +
+
+ + +
+ + + +## Struct `PropertyValue` + + + +
struct PropertyValue has copy, drop, store
+
+ + + +
+Fields + + +
+
+value: vector<u8> +
+
+ +
+
+type: string::String +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The property key already exists + + +
const EKEY_AREADY_EXIST_IN_PROPERTY_MAP: u64 = 1;
+
+ + + + + +Property key and type count don't match + + +
const EKEY_COUNT_NOT_MATCH_TYPE_COUNT: u64 = 5;
+
+ + + + + +Property key and value count don't match + + +
const EKEY_COUNT_NOT_MATCH_VALUE_COUNT: u64 = 4;
+
+ + + + + +The name (key) of the property is too long + + +
const EPROPERTY_MAP_NAME_TOO_LONG: u64 = 7;
+
+ + + + + +The property doesn't exist + + +
const EPROPERTY_NOT_EXIST: u64 = 3;
+
+ + + + + +The number of property exceeds the limit + + +
const EPROPERTY_NUMBER_EXCEED_LIMIT: u64 = 2;
+
+ + + + + +Property type doesn't match + + +
const ETYPE_NOT_MATCH: u64 = 6;
+
+ + + + + +The maximal number of property that can be stored in property map + + +
const MAX_PROPERTY_MAP_SIZE: u64 = 1000;
+
+ + + + + + + +
const MAX_PROPERTY_NAME_LENGTH: u64 = 128;
+
+ + + + + +## Function `new` + + + +
public fun new(keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>): property_map::PropertyMap
+
+ + + +
+Implementation + + +
public fun new(
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    types: vector<String>
+): PropertyMap {
+    let length = vector::length(&keys);
+    assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(EPROPERTY_NUMBER_EXCEED_LIMIT));
+    assert!(length == vector::length(&values), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT));
+    assert!(length == vector::length(&types), error::invalid_argument(EKEY_COUNT_NOT_MATCH_TYPE_COUNT));
+
+    let properties = empty();
+
+    let i = 0;
+    while (i < length) {
+        let key = *vector::borrow(&keys, i);
+        assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG));
+        simple_map::add(
+            &mut properties.map,
+            key,
+            PropertyValue { value: *vector::borrow(&values, i), type: *vector::borrow(&types, i) }
+        );
+        i = i + 1;
+    };
+    properties
+}
+
+ + + +
+ + + +## Function `new_with_key_and_property_value` + +Create property map directly from key and property value + + +
public fun new_with_key_and_property_value(keys: vector<string::String>, values: vector<property_map::PropertyValue>): property_map::PropertyMap
+
+ + + +
+Implementation + + +
public fun new_with_key_and_property_value(
+    keys: vector<String>,
+    values: vector<PropertyValue>
+): PropertyMap {
+    let length = vector::length(&keys);
+    assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(EPROPERTY_NUMBER_EXCEED_LIMIT));
+    assert!(length == vector::length(&values), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT));
+
+    let properties = empty();
+
+    let i = 0;
+    while (i < length) {
+        let key = *vector::borrow(&keys, i);
+        let val = *vector::borrow(&values, i);
+        assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG));
+        add(&mut properties, key, val);
+        i = i + 1;
+    };
+    properties
+}
+
+ + + +
+ + + +## Function `empty` + + + +
public fun empty(): property_map::PropertyMap
+
+ + + +
+Implementation + + +
public fun empty(): PropertyMap {
+    PropertyMap {
+        map: simple_map::create<String, PropertyValue>(),
+    }
+}
+
+ + + +
+ + + +## Function `contains_key` + + + +
public fun contains_key(map: &property_map::PropertyMap, key: &string::String): bool
+
+ + + +
+Implementation + + +
public fun contains_key(map: &PropertyMap, key: &String): bool {
+    simple_map::contains_key(&map.map, key)
+}
+
+ + + +
+ + + +## Function `add` + + + +
public fun add(map: &mut property_map::PropertyMap, key: string::String, value: property_map::PropertyValue)
+
+ + + +
+Implementation + + +
public fun add(map: &mut PropertyMap, key: String, value: PropertyValue) {
+    assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG));
+    assert!(simple_map::length(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT));
+    simple_map::add(&mut map.map, key, value);
+}
+
+ + + +
+ + + +## Function `length` + + + +
public fun length(map: &property_map::PropertyMap): u64
+
+ + + +
+Implementation + + +
public fun length(map: &PropertyMap): u64 {
+    simple_map::length(&map.map)
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
public fun borrow(map: &property_map::PropertyMap, key: &string::String): &property_map::PropertyValue
+
+ + + +
+Implementation + + +
public fun borrow(map: &PropertyMap, key: &String): &PropertyValue {
+    let found = contains_key(map, key);
+    assert!(found, EPROPERTY_NOT_EXIST);
+    simple_map::borrow(&map.map, key)
+}
+
+ + + +
+ + + +## Function `keys` + +Return all the keys in the property map in the order they are added. + + +
public fun keys(map: &property_map::PropertyMap): vector<string::String>
+
+ + + +
+Implementation + + +
public fun keys(map: &PropertyMap): vector<String> {
+    simple_map::keys(&map.map)
+}
+
+ + + +
+ + + +## Function `types` + +Return the types of all properties in the property map in the order they are added. + + +
public fun types(map: &property_map::PropertyMap): vector<string::String>
+
+ + + +
+Implementation + + +
public fun types(map: &PropertyMap): vector<String> {
+    vector::map_ref(&simple_map::values(&map.map), |v| {
+        let v: &PropertyValue = v;
+        v.type
+    })
+}
+
+ + + +
+ + + +## Function `values` + +Return the values of all properties in the property map in the order they are added. + + +
public fun values(map: &property_map::PropertyMap): vector<vector<u8>>
+
+ + + +
+Implementation + + +
public fun values(map: &PropertyMap): vector<vector<u8>> {
+    vector::map_ref(&simple_map::values(&map.map), |v| {
+        let v: &PropertyValue = v;
+        v.value
+    })
+}
+
+ + + +
+ + + +## Function `read_string` + + + +
public fun read_string(map: &property_map::PropertyMap, key: &string::String): string::String
+
+ + + +
+Implementation + + +
public fun read_string(map: &PropertyMap, key: &String): String {
+    let prop = borrow(map, key);
+    assert!(prop.type == string::utf8(b"0x1::string::String"), error::invalid_state(ETYPE_NOT_MATCH));
+    from_bcs::to_string(prop.value)
+}
+
+ + + +
+ + + +## Function `read_u8` + + + +
public fun read_u8(map: &property_map::PropertyMap, key: &string::String): u8
+
+ + + +
+Implementation + + +
public fun read_u8(map: &PropertyMap, key: &String): u8 {
+    let prop = borrow(map, key);
+    assert!(prop.type == string::utf8(b"u8"), error::invalid_state(ETYPE_NOT_MATCH));
+    from_bcs::to_u8(prop.value)
+}
+
+ + + +
+ + + +## Function `read_u64` + + + +
public fun read_u64(map: &property_map::PropertyMap, key: &string::String): u64
+
+ + + +
+Implementation + + +
public fun read_u64(map: &PropertyMap, key: &String): u64 {
+    let prop = borrow(map, key);
+    assert!(prop.type == string::utf8(b"u64"), error::invalid_state(ETYPE_NOT_MATCH));
+    from_bcs::to_u64(prop.value)
+}
+
+ + + +
+ + + +## Function `read_address` + + + +
public fun read_address(map: &property_map::PropertyMap, key: &string::String): address
+
+ + + +
+Implementation + + +
public fun read_address(map: &PropertyMap, key: &String): address {
+    let prop = borrow(map, key);
+    assert!(prop.type == string::utf8(b"address"), error::invalid_state(ETYPE_NOT_MATCH));
+    from_bcs::to_address(prop.value)
+}
+
+ + + +
+ + + +## Function `read_u128` + + + +
public fun read_u128(map: &property_map::PropertyMap, key: &string::String): u128
+
+ + + +
+Implementation + + +
public fun read_u128(map: &PropertyMap, key: &String): u128 {
+    let prop = borrow(map, key);
+    assert!(prop.type == string::utf8(b"u128"), error::invalid_state(ETYPE_NOT_MATCH));
+    from_bcs::to_u128(prop.value)
+}
+
+ + + +
+ + + +## Function `read_bool` + + + +
public fun read_bool(map: &property_map::PropertyMap, key: &string::String): bool
+
+ + + +
+Implementation + + +
public fun read_bool(map: &PropertyMap, key: &String): bool {
+    let prop = borrow(map, key);
+    assert!(prop.type == string::utf8(b"bool"), error::invalid_state(ETYPE_NOT_MATCH));
+    from_bcs::to_bool(prop.value)
+}
+
+ + + +
+ + + +## Function `borrow_value` + + + +
public fun borrow_value(property: &property_map::PropertyValue): vector<u8>
+
+ + + +
+Implementation + + +
public fun borrow_value(property: &PropertyValue): vector<u8> {
+    property.value
+}
+
+ + + +
+ + + +## Function `borrow_type` + + + +
public fun borrow_type(property: &property_map::PropertyValue): string::String
+
+ + + +
+Implementation + + +
public fun borrow_type(property: &PropertyValue): String {
+    property.type
+}
+
+ + + +
+ + + +## Function `remove` + + + +
public fun remove(map: &mut property_map::PropertyMap, key: &string::String): (string::String, property_map::PropertyValue)
+
+ + + +
+Implementation + + +
public fun remove(
+    map: &mut PropertyMap,
+    key: &String
+): (String, PropertyValue) {
+    let found = contains_key(map, key);
+    assert!(found, error::not_found(EPROPERTY_NOT_EXIST));
+    simple_map::remove(&mut map.map, key)
+}
+
+ + + +
+ + + +## Function `update_property_map` + +Update the property in the existing property map +Allow updating existing keys' value and add new key-value pairs + + +
public fun update_property_map(map: &mut property_map::PropertyMap, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + + +
+Implementation + + +
public fun update_property_map(
+    map: &mut PropertyMap,
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    types: vector<String>,
+) {
+    let key_len = vector::length(&keys);
+    let val_len = vector::length(&values);
+    let typ_len = vector::length(&types);
+    assert!(key_len == val_len, error::invalid_state(EKEY_COUNT_NOT_MATCH_VALUE_COUNT));
+    assert!(key_len == typ_len, error::invalid_state(EKEY_COUNT_NOT_MATCH_TYPE_COUNT));
+
+    let i = 0;
+    while (i < key_len) {
+        let key = vector::borrow(&keys, i);
+        let prop_val = PropertyValue {
+            value: *vector::borrow(&values, i),
+            type: *vector::borrow(&types, i),
+        };
+        if (contains_key(map, key)) {
+            update_property_value(map, key, prop_val);
+        } else {
+            add(map, *key, prop_val);
+        };
+        i = i + 1;
+    }
+}
+
+ + + +
+ + + +## Function `update_property_value` + + + +
public fun update_property_value(map: &mut property_map::PropertyMap, key: &string::String, value: property_map::PropertyValue)
+
+ + + +
+Implementation + + +
public fun update_property_value(
+    map: &mut PropertyMap,
+    key: &String,
+    value: PropertyValue
+) {
+    let property_val = simple_map::borrow_mut(&mut map.map, key);
+    *property_val = value;
+}
+
+ + + +
+ + + +## Function `create_property_value_raw` + + + +
public fun create_property_value_raw(value: vector<u8>, type: string::String): property_map::PropertyValue
+
+ + + +
+Implementation + + +
public fun create_property_value_raw(
+    value: vector<u8>,
+    type: String
+): PropertyValue {
+    PropertyValue {
+        value,
+        type,
+    }
+}
+
+ + + +
+ + + +## Function `create_property_value` + +create a property value from generic type data + + +
public fun create_property_value<T: copy>(data: &T): property_map::PropertyValue
+
+ + + +
+Implementation + + +
public fun create_property_value<T: copy>(data: &T): PropertyValue {
+    let name = type_name<T>();
+    if (
+        name == string::utf8(b"bool") ||
+            name == string::utf8(b"u8") ||
+            name == string::utf8(b"u64") ||
+            name == string::utf8(b"u128") ||
+            name == string::utf8(b"address") ||
+            name == string::utf8(b"0x1::string::String")
+    ) {
+        create_property_value_raw(bcs::to_bytes<T>(data), name)
+    } else {
+        create_property_value_raw(bcs::to_bytes<T>(data), string::utf8(b"vector<u8>"))
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+let MAX_PROPERTY_MAP_SIZE = 1000;
+let MAX_PROPERTY_NAME_LENGTH  = 128;
+
+ + + + + +### Function `new` + + +
public fun new(keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>): property_map::PropertyMap
+
+ + + + +
pragma aborts_if_is_partial;
+let length = len(keys);
+aborts_if !(length <= MAX_PROPERTY_MAP_SIZE);
+aborts_if !(length == vector::length(values));
+aborts_if !(length == vector::length(types));
+
+ + + + + +### Function `new_with_key_and_property_value` + + +
public fun new_with_key_and_property_value(keys: vector<string::String>, values: vector<property_map::PropertyValue>): property_map::PropertyMap
+
+ + + + +
pragma aborts_if_is_partial;
+let length = vector::length(keys);
+aborts_if !(length <= MAX_PROPERTY_MAP_SIZE);
+aborts_if !(length == len(values));
+
+ + + + + +### Function `empty` + + +
public fun empty(): property_map::PropertyMap
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `contains_key` + + +
public fun contains_key(map: &property_map::PropertyMap, key: &string::String): bool
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `add` + + +
public fun add(map: &mut property_map::PropertyMap, key: string::String, value: property_map::PropertyValue)
+
+ + + + +
aborts_if !(string::length(key) <= MAX_PROPERTY_NAME_LENGTH);
+aborts_if !(!simple_map::spec_contains_key(map.map, key));
+aborts_if !(simple_map::spec_len(map.map) < MAX_PROPERTY_MAP_SIZE);
+
+ + + + + +### Function `length` + + +
public fun length(map: &property_map::PropertyMap): u64
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `borrow` + + +
public fun borrow(map: &property_map::PropertyMap, key: &string::String): &property_map::PropertyValue
+
+ + + + +
aborts_if !simple_map::spec_contains_key(map.map, key);
+
+ + + + + +### Function `keys` + + +
public fun keys(map: &property_map::PropertyMap): vector<string::String>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `types` + + +
public fun types(map: &property_map::PropertyMap): vector<string::String>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `values` + + +
public fun values(map: &property_map::PropertyMap): vector<vector<u8>>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `read_string` + + +
public fun read_string(map: &property_map::PropertyMap, key: &string::String): string::String
+
+ + +Check utf8 for correctness and whether equal +to prop.type + + +
pragma aborts_if_is_partial;
+aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !string::spec_internal_check_utf8(b"0x1::string::String");
+let prop = simple_map::spec_get(map.map, key);
+aborts_if prop.type != spec_utf8(b"0x1::string::String");
+aborts_if !aptos_std::from_bcs::deserializable<String>(prop.value);
+
+ + + + + + + +
fun spec_utf8(bytes: vector<u8>): String {
+   String{bytes}
+}
+
+ + + + + +### Function `read_u8` + + +
public fun read_u8(map: &property_map::PropertyMap, key: &string::String): u8
+
+ + + + +
let str = b"u8";
+aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !string::spec_internal_check_utf8(str);
+let prop = simple_map::spec_get(map.map, key);
+aborts_if prop.type != spec_utf8(str);
+aborts_if !aptos_std::from_bcs::deserializable<u8>(prop.value);
+
+ + + + + +### Function `read_u64` + + +
public fun read_u64(map: &property_map::PropertyMap, key: &string::String): u64
+
+ + + + +
let str = b"u64";
+aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !string::spec_internal_check_utf8(str);
+let prop = simple_map::spec_get(map.map, key);
+aborts_if prop.type != spec_utf8(str);
+aborts_if !aptos_std::from_bcs::deserializable<u64>(prop.value);
+
+ + + + + +### Function `read_address` + + +
public fun read_address(map: &property_map::PropertyMap, key: &string::String): address
+
+ + + + +
let str = b"address";
+aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !string::spec_internal_check_utf8(str);
+let prop = simple_map::spec_get(map.map, key);
+aborts_if prop.type != spec_utf8(str);
+aborts_if !aptos_std::from_bcs::deserializable<address>(prop.value);
+
+ + + + + +### Function `read_u128` + + +
public fun read_u128(map: &property_map::PropertyMap, key: &string::String): u128
+
+ + + + +
let str = b"u128";
+aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !string::spec_internal_check_utf8(str);
+let prop = simple_map::spec_get(map.map, key);
+aborts_if prop.type != spec_utf8(str);
+aborts_if !aptos_std::from_bcs::deserializable<u128>(prop.value);
+
+ + + + + +### Function `read_bool` + + +
public fun read_bool(map: &property_map::PropertyMap, key: &string::String): bool
+
+ + + + +
let str = b"bool";
+aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !string::spec_internal_check_utf8(str);
+let prop = simple_map::spec_get(map.map, key);
+aborts_if prop.type != spec_utf8(str);
+aborts_if !aptos_std::from_bcs::deserializable<bool>(prop.value);
+
+ + + + + +### Function `borrow_value` + + +
public fun borrow_value(property: &property_map::PropertyValue): vector<u8>
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `borrow_type` + + +
public fun borrow_type(property: &property_map::PropertyValue): string::String
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `remove` + + +
public fun remove(map: &mut property_map::PropertyMap, key: &string::String): (string::String, property_map::PropertyValue)
+
+ + + + +
aborts_if !simple_map::spec_contains_key(map.map, key);
+
+ + + + + +### Function `update_property_map` + + +
public fun update_property_map(map: &mut property_map::PropertyMap, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + + + +
pragma aborts_if_is_partial;
+let key_len = len(keys);
+let val_len = len(values);
+let typ_len = len(types);
+aborts_if !(key_len == val_len);
+aborts_if !(key_len == typ_len);
+
+ + + + + +### Function `update_property_value` + + +
public fun update_property_value(map: &mut property_map::PropertyMap, key: &string::String, value: property_map::PropertyValue)
+
+ + + + +
aborts_if !simple_map::spec_contains_key(map.map, key);
+
+ + + + + +### Function `create_property_value_raw` + + +
public fun create_property_value_raw(value: vector<u8>, type: string::String): property_map::PropertyValue
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `create_property_value` + + +
public fun create_property_value<T: copy>(data: &T): property_map::PropertyValue
+
+ + +Abort according to the code + + +
let name = type_name<T>();
+aborts_if !string::spec_internal_check_utf8(b"bool");
+aborts_if name != spec_utf8(b"bool") &&
+    !string::spec_internal_check_utf8(b"u8");
+aborts_if name != spec_utf8(b"bool") &&
+    name != spec_utf8(b"u8") &&
+    !string::spec_internal_check_utf8(b"u64");
+aborts_if name != spec_utf8(b"bool") &&
+    name != spec_utf8(b"u8") &&
+    name != spec_utf8(b"u64") &&
+    !string::spec_internal_check_utf8(b"u128");
+aborts_if name != spec_utf8(b"bool") &&
+    name != spec_utf8(b"u8") &&
+    name != spec_utf8(b"u64") &&
+    name != spec_utf8(b"u128") &&
+    !string::spec_internal_check_utf8(b"address");
+aborts_if name != spec_utf8(b"bool") &&
+    name != spec_utf8(b"u8") &&
+    name != spec_utf8(b"u64") &&
+    name != spec_utf8(b"u128") &&
+    name != spec_utf8(b"address") &&
+    !string::spec_internal_check_utf8(b"0x1::string::String");
+aborts_if name != spec_utf8(b"bool") &&
+    name != spec_utf8(b"u8") &&
+    name != spec_utf8(b"u64") &&
+    name != spec_utf8(b"u128") &&
+    name != spec_utf8(b"address") &&
+    name != spec_utf8(b"0x1::string::String") &&
+    !string::spec_internal_check_utf8(b"vector<u8>");
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token.md b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token.md new file mode 100644 index 0000000000000..818be7527c2f8 --- /dev/null +++ b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token.md @@ -0,0 +1,6594 @@ + + + +# Module `0x3::token` + +This module provides the foundation for Tokens. +Checkout our developer doc on our token standard https://aptos.dev/standards + + +- [Struct `Token`](#0x3_token_Token) +- [Struct `TokenId`](#0x3_token_TokenId) +- [Struct `TokenDataId`](#0x3_token_TokenDataId) +- [Struct `TokenData`](#0x3_token_TokenData) +- [Struct `Royalty`](#0x3_token_Royalty) +- [Struct `TokenMutabilityConfig`](#0x3_token_TokenMutabilityConfig) +- [Resource `TokenStore`](#0x3_token_TokenStore) +- [Struct `CollectionMutabilityConfig`](#0x3_token_CollectionMutabilityConfig) +- [Resource `Collections`](#0x3_token_Collections) +- [Struct `CollectionData`](#0x3_token_CollectionData) +- [Struct `WithdrawCapability`](#0x3_token_WithdrawCapability) +- [Struct `DepositEvent`](#0x3_token_DepositEvent) +- [Struct `Deposit`](#0x3_token_Deposit) +- [Struct `WithdrawEvent`](#0x3_token_WithdrawEvent) +- [Struct `Withdraw`](#0x3_token_Withdraw) +- [Struct `CreateTokenDataEvent`](#0x3_token_CreateTokenDataEvent) +- [Struct `CreateTokenData`](#0x3_token_CreateTokenData) +- [Struct `MintTokenEvent`](#0x3_token_MintTokenEvent) +- [Struct `MintToken`](#0x3_token_MintToken) +- [Struct `BurnTokenEvent`](#0x3_token_BurnTokenEvent) +- [Struct `BurnToken`](#0x3_token_BurnToken) +- [Struct `MutateTokenPropertyMapEvent`](#0x3_token_MutateTokenPropertyMapEvent) +- [Struct `MutateTokenPropertyMap`](#0x3_token_MutateTokenPropertyMap) +- [Struct `CreateCollectionEvent`](#0x3_token_CreateCollectionEvent) +- [Struct `CreateCollection`](#0x3_token_CreateCollection) +- [Constants](#@Constants_0) +- [Function `create_collection_script`](#0x3_token_create_collection_script) +- [Function `create_token_script`](#0x3_token_create_token_script) +- [Function `mint_script`](#0x3_token_mint_script) +- [Function `mutate_token_properties`](#0x3_token_mutate_token_properties) +- [Function `direct_transfer_script`](#0x3_token_direct_transfer_script) +- [Function `opt_in_direct_transfer`](#0x3_token_opt_in_direct_transfer) +- [Function `transfer_with_opt_in`](#0x3_token_transfer_with_opt_in) +- [Function `burn_by_creator`](#0x3_token_burn_by_creator) +- [Function `burn`](#0x3_token_burn) +- [Function `mutate_collection_description`](#0x3_token_mutate_collection_description) +- [Function `mutate_collection_uri`](#0x3_token_mutate_collection_uri) +- [Function `mutate_collection_maximum`](#0x3_token_mutate_collection_maximum) +- [Function `mutate_tokendata_maximum`](#0x3_token_mutate_tokendata_maximum) +- [Function `mutate_tokendata_uri`](#0x3_token_mutate_tokendata_uri) +- [Function `mutate_tokendata_royalty`](#0x3_token_mutate_tokendata_royalty) +- [Function `mutate_tokendata_description`](#0x3_token_mutate_tokendata_description) +- [Function `mutate_tokendata_property`](#0x3_token_mutate_tokendata_property) +- [Function `mutate_one_token`](#0x3_token_mutate_one_token) +- [Function `create_royalty`](#0x3_token_create_royalty) +- [Function `deposit_token`](#0x3_token_deposit_token) +- [Function `direct_deposit_with_opt_in`](#0x3_token_direct_deposit_with_opt_in) +- [Function `direct_transfer`](#0x3_token_direct_transfer) +- [Function `initialize_token_store`](#0x3_token_initialize_token_store) +- [Function `merge`](#0x3_token_merge) +- [Function `split`](#0x3_token_split) +- [Function `token_id`](#0x3_token_token_id) +- [Function `transfer`](#0x3_token_transfer) +- [Function `create_withdraw_capability`](#0x3_token_create_withdraw_capability) +- [Function `withdraw_with_capability`](#0x3_token_withdraw_with_capability) +- [Function `partial_withdraw_with_capability`](#0x3_token_partial_withdraw_with_capability) +- [Function `withdraw_token`](#0x3_token_withdraw_token) +- [Function `create_collection`](#0x3_token_create_collection) +- [Function `check_collection_exists`](#0x3_token_check_collection_exists) +- [Function `check_tokendata_exists`](#0x3_token_check_tokendata_exists) +- [Function `create_tokendata`](#0x3_token_create_tokendata) +- [Function `get_collection_supply`](#0x3_token_get_collection_supply) +- [Function `get_collection_description`](#0x3_token_get_collection_description) +- [Function `get_collection_uri`](#0x3_token_get_collection_uri) +- [Function `get_collection_maximum`](#0x3_token_get_collection_maximum) +- [Function `get_token_supply`](#0x3_token_get_token_supply) +- [Function `get_tokendata_largest_property_version`](#0x3_token_get_tokendata_largest_property_version) +- [Function `get_token_id`](#0x3_token_get_token_id) +- [Function `get_direct_transfer`](#0x3_token_get_direct_transfer) +- [Function `create_token_mutability_config`](#0x3_token_create_token_mutability_config) +- [Function `create_collection_mutability_config`](#0x3_token_create_collection_mutability_config) +- [Function `mint_token`](#0x3_token_mint_token) +- [Function `mint_token_to`](#0x3_token_mint_token_to) +- [Function `create_token_id`](#0x3_token_create_token_id) +- [Function `create_token_data_id`](#0x3_token_create_token_data_id) +- [Function `create_token_id_raw`](#0x3_token_create_token_id_raw) +- [Function `balance_of`](#0x3_token_balance_of) +- [Function `has_token_store`](#0x3_token_has_token_store) +- [Function `get_royalty`](#0x3_token_get_royalty) +- [Function `get_royalty_numerator`](#0x3_token_get_royalty_numerator) +- [Function `get_royalty_denominator`](#0x3_token_get_royalty_denominator) +- [Function `get_royalty_payee`](#0x3_token_get_royalty_payee) +- [Function `get_token_amount`](#0x3_token_get_token_amount) +- [Function `get_token_id_fields`](#0x3_token_get_token_id_fields) +- [Function `get_token_data_id_fields`](#0x3_token_get_token_data_id_fields) +- [Function `get_property_map`](#0x3_token_get_property_map) +- [Function `get_tokendata_maximum`](#0x3_token_get_tokendata_maximum) +- [Function `get_tokendata_uri`](#0x3_token_get_tokendata_uri) +- [Function `get_tokendata_description`](#0x3_token_get_tokendata_description) +- [Function `get_tokendata_royalty`](#0x3_token_get_tokendata_royalty) +- [Function `get_tokendata_id`](#0x3_token_get_tokendata_id) +- [Function `get_tokendata_mutability_config`](#0x3_token_get_tokendata_mutability_config) +- [Function `get_token_mutability_maximum`](#0x3_token_get_token_mutability_maximum) +- [Function `get_token_mutability_royalty`](#0x3_token_get_token_mutability_royalty) +- [Function `get_token_mutability_uri`](#0x3_token_get_token_mutability_uri) +- [Function `get_token_mutability_description`](#0x3_token_get_token_mutability_description) +- [Function `get_token_mutability_default_properties`](#0x3_token_get_token_mutability_default_properties) +- [Function `get_collection_mutability_config`](#0x3_token_get_collection_mutability_config) +- [Function `get_collection_mutability_description`](#0x3_token_get_collection_mutability_description) +- [Function `get_collection_mutability_uri`](#0x3_token_get_collection_mutability_uri) +- [Function `get_collection_mutability_maximum`](#0x3_token_get_collection_mutability_maximum) +- [Function `destroy_token_data`](#0x3_token_destroy_token_data) +- [Function `destroy_collection_data`](#0x3_token_destroy_collection_data) +- [Function `withdraw_with_event_internal`](#0x3_token_withdraw_with_event_internal) +- [Function `update_token_property_internal`](#0x3_token_update_token_property_internal) +- [Function `direct_deposit`](#0x3_token_direct_deposit) +- [Function `assert_collection_exists`](#0x3_token_assert_collection_exists) +- [Function `assert_tokendata_exists`](#0x3_token_assert_tokendata_exists) +- [Function `assert_non_standard_reserved_property`](#0x3_token_assert_non_standard_reserved_property) +- [Function `initialize_token_script`](#0x3_token_initialize_token_script) +- [Function `initialize_token`](#0x3_token_initialize_token) +- [Specification](#@Specification_1) + - [Function `create_collection_script`](#@Specification_1_create_collection_script) + - [Function `create_token_script`](#@Specification_1_create_token_script) + - [Function `mint_script`](#@Specification_1_mint_script) + - [Function `mutate_token_properties`](#@Specification_1_mutate_token_properties) + - [Function `direct_transfer_script`](#@Specification_1_direct_transfer_script) + - [Function `opt_in_direct_transfer`](#@Specification_1_opt_in_direct_transfer) + - [Function `transfer_with_opt_in`](#@Specification_1_transfer_with_opt_in) + - [Function `burn_by_creator`](#@Specification_1_burn_by_creator) + - [Function `burn`](#@Specification_1_burn) + - [Function `mutate_collection_description`](#@Specification_1_mutate_collection_description) + - [Function `mutate_collection_uri`](#@Specification_1_mutate_collection_uri) + - [Function `mutate_collection_maximum`](#@Specification_1_mutate_collection_maximum) + - [Function `mutate_tokendata_maximum`](#@Specification_1_mutate_tokendata_maximum) + - [Function `mutate_tokendata_uri`](#@Specification_1_mutate_tokendata_uri) + - [Function `mutate_tokendata_royalty`](#@Specification_1_mutate_tokendata_royalty) + - [Function `mutate_tokendata_description`](#@Specification_1_mutate_tokendata_description) + - [Function `mutate_tokendata_property`](#@Specification_1_mutate_tokendata_property) + - [Function `mutate_one_token`](#@Specification_1_mutate_one_token) + - [Function `create_royalty`](#@Specification_1_create_royalty) + - [Function `deposit_token`](#@Specification_1_deposit_token) + - [Function `direct_deposit_with_opt_in`](#@Specification_1_direct_deposit_with_opt_in) + - [Function `direct_transfer`](#@Specification_1_direct_transfer) + - [Function `initialize_token_store`](#@Specification_1_initialize_token_store) + - [Function `merge`](#@Specification_1_merge) + - [Function `split`](#@Specification_1_split) + - [Function `transfer`](#@Specification_1_transfer) + - [Function `withdraw_with_capability`](#@Specification_1_withdraw_with_capability) + - [Function `partial_withdraw_with_capability`](#@Specification_1_partial_withdraw_with_capability) + - [Function `withdraw_token`](#@Specification_1_withdraw_token) + - [Function `create_collection`](#@Specification_1_create_collection) + - [Function `check_collection_exists`](#@Specification_1_check_collection_exists) + - [Function `check_tokendata_exists`](#@Specification_1_check_tokendata_exists) + - [Function `create_tokendata`](#@Specification_1_create_tokendata) + - [Function `get_collection_supply`](#@Specification_1_get_collection_supply) + - [Function `get_collection_description`](#@Specification_1_get_collection_description) + - [Function `get_collection_uri`](#@Specification_1_get_collection_uri) + - [Function `get_collection_maximum`](#@Specification_1_get_collection_maximum) + - [Function `get_token_supply`](#@Specification_1_get_token_supply) + - [Function `get_tokendata_largest_property_version`](#@Specification_1_get_tokendata_largest_property_version) + - [Function `create_token_mutability_config`](#@Specification_1_create_token_mutability_config) + - [Function `create_collection_mutability_config`](#@Specification_1_create_collection_mutability_config) + - [Function `mint_token`](#@Specification_1_mint_token) + - [Function `mint_token_to`](#@Specification_1_mint_token_to) + - [Function `create_token_data_id`](#@Specification_1_create_token_data_id) + - [Function `create_token_id_raw`](#@Specification_1_create_token_id_raw) + - [Function `get_royalty`](#@Specification_1_get_royalty) + - [Function `get_property_map`](#@Specification_1_get_property_map) + - [Function `get_tokendata_maximum`](#@Specification_1_get_tokendata_maximum) + - [Function `get_tokendata_uri`](#@Specification_1_get_tokendata_uri) + - [Function `get_tokendata_description`](#@Specification_1_get_tokendata_description) + - [Function `get_tokendata_royalty`](#@Specification_1_get_tokendata_royalty) + - [Function `get_tokendata_mutability_config`](#@Specification_1_get_tokendata_mutability_config) + - [Function `get_collection_mutability_config`](#@Specification_1_get_collection_mutability_config) + - [Function `withdraw_with_event_internal`](#@Specification_1_withdraw_with_event_internal) + - [Function `update_token_property_internal`](#@Specification_1_update_token_property_internal) + - [Function `direct_deposit`](#@Specification_1_direct_deposit) + - [Function `assert_collection_exists`](#@Specification_1_assert_collection_exists) + - [Function `assert_tokendata_exists`](#@Specification_1_assert_tokendata_exists) + - [Function `assert_non_standard_reserved_property`](#@Specification_1_assert_non_standard_reserved_property) + - [Function `initialize_token_script`](#@Specification_1_initialize_token_script) + - [Function `initialize_token`](#@Specification_1_initialize_token) + + +
use 0x1::account;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::table;
+use 0x1::timestamp;
+use 0x3::property_map;
+use 0x3::token_event_store;
+
+ + + + + +## Struct `Token` + + + +
struct Token has store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ the amount of tokens. Only property_version = 0 can have a value bigger than 1. +
+
+token_properties: property_map::PropertyMap +
+
+ The properties with this token. + when property_version = 0, the token_properties are the same as default_properties in TokenData, we don't store it. + when the property_map mutates, a new property_version is assigned to the token. +
+
+ + +
+ + + +## Struct `TokenId` + +global unique identifier of a token + + +
struct TokenId has copy, drop, store
+
+ + + +
+Fields + + +
+
+token_data_id: token::TokenDataId +
+
+ the id to the common token data shared by token with different property_version +
+
+property_version: u64 +
+
+ The version of the property map; when a fungible token is mutated, a new property version is created and assigned to the token to make it an NFT +
+
+ + +
+ + + +## Struct `TokenDataId` + +globally unique identifier of tokendata + + +
struct TokenDataId has copy, drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ The address of the creator, eg: 0xcafe +
+
+collection: string::String +
+
+ The name of collection; this is unique under the same account, eg: "Aptos Animal Collection" +
+
+name: string::String +
+
+ The name of the token; this is the same as the name field of TokenData +
+
+ + +
+ + + +## Struct `TokenData` + +The shared TokenData by tokens with different property_version + + +
struct TokenData has store
+
+ + + +
+Fields + + +
+
+maximum: u64 +
+
+ The maximal number of tokens that can be minted under this TokenData; if the maximum is 0, there is no limit +
+
+largest_property_version: u64 +
+
+ The current largest property version of all tokens with this TokenData +
+
+supply: u64 +
+
+ The number of tokens with this TokenData. Supply is only tracked for the limited token whose maximum is not 0 +
+
+uri: string::String +
+
+ The Uniform Resource Identifier (uri) pointing to the JSON file stored in off-chain storage; the URL length should be less than 512 characters, eg: https://arweave.net/Fmmn4ul-7Mv6vzm7JwE69O-I-vd6Bz2QriJO1niwCh4 +
+
+royalty: token::Royalty +
+
+ The denominator and numerator for calculating the royalty fee; it also contains payee account address for depositing the Royalty +
+
+name: string::String +
+
+ The name of the token, which should be unique within the collection; the length of name should be smaller than 128, characters, eg: "Aptos Animal #1234" +
+
+description: string::String +
+
+ Describes this Token +
+
+default_properties: property_map::PropertyMap +
+
+ The properties are stored in the TokenData that are shared by all tokens +
+
+mutability_config: token::TokenMutabilityConfig +
+
+ Control the TokenData field mutability +
+
+ + +
+ + + +## Struct `Royalty` + +The royalty of a token + + +
struct Royalty has copy, drop, store
+
+ + + +
+Fields + + +
+
+royalty_points_numerator: u64 +
+
+ +
+
+royalty_points_denominator: u64 +
+
+ +
+
+payee_address: address +
+
+ if the token is jointly owned by multiple creators, the group of creators should create a shared account. + the payee_address will be the shared account address. +
+
+ + +
+ + + +## Struct `TokenMutabilityConfig` + +This config specifies which fields in the TokenData are mutable + + +
struct TokenMutabilityConfig has copy, drop, store
+
+ + + +
+Fields + + +
+
+maximum: bool +
+
+ control if the token maximum is mutable +
+
+uri: bool +
+
+ control if the token uri is mutable +
+
+royalty: bool +
+
+ control if the token royalty is mutable +
+
+description: bool +
+
+ control if the token description is mutable +
+
+properties: bool +
+
+ control if the property map is mutable +
+
+ + +
+ + + +## Resource `TokenStore` + +Represents token resources owned by token owner + + +
struct TokenStore has key
+
+ + + +
+Fields + + +
+
+tokens: table::Table<token::TokenId, token::Token> +
+
+ the tokens owned by a token owner +
+
+direct_transfer: bool +
+
+ +
+
+deposit_events: event::EventHandle<token::DepositEvent> +
+
+ +
+
+withdraw_events: event::EventHandle<token::WithdrawEvent> +
+
+ +
+
+burn_events: event::EventHandle<token::BurnTokenEvent> +
+
+ +
+
+mutate_token_property_events: event::EventHandle<token::MutateTokenPropertyMapEvent> +
+
+ +
+
+ + +
+ + + +## Struct `CollectionMutabilityConfig` + +This config specifies which fields in the Collection are mutable + + +
struct CollectionMutabilityConfig has copy, drop, store
+
+ + + +
+Fields + + +
+
+description: bool +
+
+ control if description is mutable +
+
+uri: bool +
+
+ control if uri is mutable +
+
+maximum: bool +
+
+ control if collection maxium is mutable +
+
+ + +
+ + + +## Resource `Collections` + +Represent collection and token metadata for a creator + + +
struct Collections has key
+
+ + + +
+Fields + + +
+
+collection_data: table::Table<string::String, token::CollectionData> +
+
+ +
+
+token_data: table::Table<token::TokenDataId, token::TokenData> +
+
+ +
+
+create_collection_events: event::EventHandle<token::CreateCollectionEvent> +
+
+ +
+
+create_token_data_events: event::EventHandle<token::CreateTokenDataEvent> +
+
+ +
+
+mint_token_events: event::EventHandle<token::MintTokenEvent> +
+
+ +
+
+ + +
+ + + +## Struct `CollectionData` + +Represent the collection metadata + + +
struct CollectionData has store
+
+ + + +
+Fields + + +
+
+description: string::String +
+
+ A description for the token collection Eg: "Aptos Toad Overload" +
+
+name: string::String +
+
+ The collection name, which should be unique among all collections by the creator; the name should also be smaller than 128 characters, eg: "Animal Collection" +
+
+uri: string::String +
+
+ The URI for the collection; its length should be smaller than 512 characters +
+
+supply: u64 +
+
+ The number of different TokenData entries in this collection +
+
+maximum: u64 +
+
+ If maximal is a non-zero value, the number of created TokenData entries should be smaller or equal to this maximum + If maximal is 0, Aptos doesn't track the supply of this collection, and there is no limit +
+
+mutability_config: token::CollectionMutabilityConfig +
+
+ control which collectionData field is mutable +
+
+ + +
+ + + +## Struct `WithdrawCapability` + +capability to withdraw without signer, this struct should be non-copyable + + +
struct WithdrawCapability has drop, store
+
+ + + +
+Fields + + +
+
+token_owner: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+expiration_sec: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DepositEvent` + +Set of data sent to the event stream during a receive + + +
struct DepositEvent has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Deposit` + +Set of data sent to the event stream during a receive + + +
#[event]
+struct Deposit has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawEvent` + +Set of data sent to the event stream during a withdrawal + + +
struct WithdrawEvent has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Withdraw` + +Set of data sent to the event stream during a withdrawal + + +
#[event]
+struct Withdraw has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CreateTokenDataEvent` + +token creation event id of token created + + +
struct CreateTokenDataEvent has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenDataId +
+
+ +
+
+description: string::String +
+
+ +
+
+maximum: u64 +
+
+ +
+
+uri: string::String +
+
+ +
+
+royalty_payee_address: address +
+
+ +
+
+royalty_points_denominator: u64 +
+
+ +
+
+royalty_points_numerator: u64 +
+
+ +
+
+name: string::String +
+
+ +
+
+mutability_config: token::TokenMutabilityConfig +
+
+ +
+
+property_keys: vector<string::String> +
+
+ +
+
+property_values: vector<vector<u8>> +
+
+ +
+
+property_types: vector<string::String> +
+
+ +
+
+ + +
+ + + +## Struct `CreateTokenData` + + + +
#[event]
+struct CreateTokenData has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenDataId +
+
+ +
+
+description: string::String +
+
+ +
+
+maximum: u64 +
+
+ +
+
+uri: string::String +
+
+ +
+
+royalty_payee_address: address +
+
+ +
+
+royalty_points_denominator: u64 +
+
+ +
+
+royalty_points_numerator: u64 +
+
+ +
+
+name: string::String +
+
+ +
+
+mutability_config: token::TokenMutabilityConfig +
+
+ +
+
+property_keys: vector<string::String> +
+
+ +
+
+property_values: vector<vector<u8>> +
+
+ +
+
+property_types: vector<string::String> +
+
+ +
+
+ + +
+ + + +## Struct `MintTokenEvent` + +mint token event. This event triggered when creator adds more supply to existing token + + +
struct MintTokenEvent has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenDataId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `MintToken` + + + +
#[event]
+struct MintToken has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenDataId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `BurnTokenEvent` + + + +
struct BurnTokenEvent has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `BurnToken` + + + +
#[event]
+struct BurnToken has drop, store
+
+ + + +
+Fields + + +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `MutateTokenPropertyMapEvent` + + + +
struct MutateTokenPropertyMapEvent has drop, store
+
+ + + +
+Fields + + +
+
+old_id: token::TokenId +
+
+ +
+
+new_id: token::TokenId +
+
+ +
+
+keys: vector<string::String> +
+
+ +
+
+values: vector<vector<u8>> +
+
+ +
+
+types: vector<string::String> +
+
+ +
+
+ + +
+ + + +## Struct `MutateTokenPropertyMap` + + + +
#[event]
+struct MutateTokenPropertyMap has drop, store
+
+ + + +
+Fields + + +
+
+old_id: token::TokenId +
+
+ +
+
+new_id: token::TokenId +
+
+ +
+
+keys: vector<string::String> +
+
+ +
+
+values: vector<vector<u8>> +
+
+ +
+
+types: vector<string::String> +
+
+ +
+
+ + +
+ + + +## Struct `CreateCollectionEvent` + +create collection event with creator address and collection name + + +
struct CreateCollectionEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+uri: string::String +
+
+ +
+
+description: string::String +
+
+ +
+
+maximum: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CreateCollection` + + + +
#[event]
+struct CreateCollection has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+uri: string::String +
+
+ +
+
+description: string::String +
+
+ +
+
+maximum: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Insufficient token balance + + +
const EINSUFFICIENT_BALANCE: u64 = 5;
+
+ + + + + +The URI is too long + + +
const EURI_TOO_LONG: u64 = 27;
+
+ + + + + + + +
const MAX_URI_LENGTH: u64 = 512;
+
+ + + + + + + +
const BURNABLE_BY_CREATOR: vector<u8> = [84, 79, 75, 69, 78, 95, 66, 85, 82, 78, 65, 66, 76, 69, 95, 66, 89, 95, 67, 82, 69, 65, 84, 79, 82];
+
+ + + + + + + +
const BURNABLE_BY_OWNER: vector<u8> = [84, 79, 75, 69, 78, 95, 66, 85, 82, 78, 65, 66, 76, 69, 95, 66, 89, 95, 79, 87, 78, 69, 82];
+
+ + + + + + + +
const COLLECTION_DESCRIPTION_MUTABLE_IND: u64 = 0;
+
+ + + + + + + +
const COLLECTION_MAX_MUTABLE_IND: u64 = 2;
+
+ + + + + + + +
const COLLECTION_URI_MUTABLE_IND: u64 = 1;
+
+ + + + + +The token has balance and cannot be initialized + + +
const EALREADY_HAS_BALANCE: u64 = 0;
+
+ + + + + +Reserved fields for token contract +Cannot be updated by user + + +
const ECANNOT_UPDATE_RESERVED_PROPERTY: u64 = 32;
+
+ + + + + +There isn't any collection under this account + + +
const ECOLLECTIONS_NOT_PUBLISHED: u64 = 1;
+
+ + + + + +The collection already exists + + +
const ECOLLECTION_ALREADY_EXISTS: u64 = 3;
+
+ + + + + +The collection name is too long + + +
const ECOLLECTION_NAME_TOO_LONG: u64 = 25;
+
+ + + + + +Cannot find collection in creator's account + + +
const ECOLLECTION_NOT_PUBLISHED: u64 = 2;
+
+ + + + + +Exceeds the collection's maximal number of token_data + + +
const ECREATE_WOULD_EXCEED_COLLECTION_MAXIMUM: u64 = 4;
+
+ + + + + +Token is not burnable by creator + + +
const ECREATOR_CANNOT_BURN_TOKEN: u64 = 31;
+
+ + + + + +The field is not mutable + + +
const EFIELD_NOT_MUTABLE: u64 = 13;
+
+ + + + + +Withdraw capability doesn't have sufficient amount + + +
const EINSUFFICIENT_WITHDRAW_CAPABILITY_AMOUNT: u64 = 38;
+
+ + + + + +Collection or tokendata maximum must be larger than supply + + +
const EINVALID_MAXIMUM: u64 = 36;
+
+ + + + + +Royalty invalid if the numerator is larger than the denominator + + +
const EINVALID_ROYALTY_NUMERATOR_DENOMINATOR: u64 = 34;
+
+ + + + + +Cannot merge the two tokens with different token id + + +
const EINVALID_TOKEN_MERGE: u64 = 6;
+
+ + + + + +Exceed the token data maximal allowed + + +
const EMINT_WOULD_EXCEED_TOKEN_MAXIMUM: u64 = 7;
+
+ + + + + +The NFT name is too long + + +
const ENFT_NAME_TOO_LONG: u64 = 26;
+
+ + + + + +Cannot split a token that only has 1 amount + + +
const ENFT_NOT_SPLITABLE: u64 = 18;
+
+ + + + + +No burn capability + + +
const ENO_BURN_CAPABILITY: u64 = 8;
+
+ + + + + +Cannot burn 0 Token + + +
const ENO_BURN_TOKEN_WITH_ZERO_AMOUNT: u64 = 29;
+
+ + + + + +Cannot deposit a Token with 0 amount + + +
const ENO_DEPOSIT_TOKEN_WITH_ZERO_AMOUNT: u64 = 28;
+
+ + + + + +No mint capability + + +
const ENO_MINT_CAPABILITY: u64 = 19;
+
+ + + + + +Not authorized to mutate + + +
const ENO_MUTATE_CAPABILITY: u64 = 14;
+
+ + + + + +Token not in the token store + + +
const ENO_TOKEN_IN_TOKEN_STORE: u64 = 15;
+
+ + + + + +Token is not burnable by owner + + +
const EOWNER_CANNOT_BURN_TOKEN: u64 = 30;
+
+ + + + + +The property is reserved by token standard + + +
const EPROPERTY_RESERVED_BY_STANDARD: u64 = 40;
+
+ + + + + +Royalty payee account does not exist + + +
const EROYALTY_PAYEE_ACCOUNT_DOES_NOT_EXIST: u64 = 35;
+
+ + + + + +TOKEN with 0 amount is not allowed + + +
const ETOKEN_CANNOT_HAVE_ZERO_AMOUNT: u64 = 33;
+
+ + + + + +TokenData already exists + + +
const ETOKEN_DATA_ALREADY_EXISTS: u64 = 9;
+
+ + + + + +TokenData not published + + +
const ETOKEN_DATA_NOT_PUBLISHED: u64 = 10;
+
+ + + + + +Token Properties count doesn't match + + +
const ETOKEN_PROPERTIES_COUNT_NOT_MATCH: u64 = 37;
+
+ + + + + +Cannot split token to an amount larger than its amount + + +
const ETOKEN_SPLIT_AMOUNT_LARGER_OR_EQUAL_TO_TOKEN_AMOUNT: u64 = 12;
+
+ + + + + +TokenStore doesn't exist + + +
const ETOKEN_STORE_NOT_PUBLISHED: u64 = 11;
+
+ + + + + +User didn't opt-in direct transfer + + +
const EUSER_NOT_OPT_IN_DIRECT_TRANSFER: u64 = 16;
+
+ + + + + +Withdraw proof expires + + +
const EWITHDRAW_PROOF_EXPIRES: u64 = 39;
+
+ + + + + +Cannot withdraw 0 token + + +
const EWITHDRAW_ZERO: u64 = 17;
+
+ + + + + + + +
const MAX_COLLECTION_NAME_LENGTH: u64 = 128;
+
+ + + + + + + +
const MAX_NFT_NAME_LENGTH: u64 = 128;
+
+ + + + + + + +
const TOKEN_DESCRIPTION_MUTABLE_IND: u64 = 3;
+
+ + + + + + + +
const TOKEN_MAX_MUTABLE_IND: u64 = 0;
+
+ + + + + + + +
const TOKEN_PROPERTY_MUTABLE: vector<u8> = [84, 79, 75, 69, 78, 95, 80, 82, 79, 80, 69, 82, 84, 89, 95, 77, 85, 84, 65, 84, 66, 76, 69];
+
+ + + + + + + +
const TOKEN_PROPERTY_MUTABLE_IND: u64 = 4;
+
+ + + + + + + +
const TOKEN_PROPERTY_VALUE_MUTABLE_IND: u64 = 5;
+
+ + + + + + + +
const TOKEN_ROYALTY_MUTABLE_IND: u64 = 2;
+
+ + + + + + + +
const TOKEN_URI_MUTABLE_IND: u64 = 1;
+
+ + + + + +## Function `create_collection_script` + +create a empty token collection with parameters + + +
public entry fun create_collection_script(creator: &signer, name: string::String, description: string::String, uri: string::String, maximum: u64, mutate_setting: vector<bool>)
+
+ + + +
+Implementation + + +
public entry fun create_collection_script(
+    creator: &signer,
+    name: String,
+    description: String,
+    uri: String,
+    maximum: u64,
+    mutate_setting: vector<bool>,
+) acquires Collections {
+    create_collection(
+        creator,
+        name,
+        description,
+        uri,
+        maximum,
+        mutate_setting
+    );
+}
+
+ + + +
+ + + +## Function `create_token_script` + +create token with raw inputs + + +
public entry fun create_token_script(account: &signer, collection: string::String, name: string::String, description: string::String, balance: u64, maximum: u64, uri: string::String, royalty_payee_address: address, royalty_points_denominator: u64, royalty_points_numerator: u64, mutate_setting: vector<bool>, property_keys: vector<string::String>, property_values: vector<vector<u8>>, property_types: vector<string::String>)
+
+ + + +
+Implementation + + +
public entry fun create_token_script(
+    account: &signer,
+    collection: String,
+    name: String,
+    description: String,
+    balance: u64,
+    maximum: u64,
+    uri: String,
+    royalty_payee_address: address,
+    royalty_points_denominator: u64,
+    royalty_points_numerator: u64,
+    mutate_setting: vector<bool>,
+    property_keys: vector<String>,
+    property_values: vector<vector<u8>>,
+    property_types: vector<String>
+) acquires Collections, TokenStore {
+    let token_mut_config = create_token_mutability_config(&mutate_setting);
+    let tokendata_id = create_tokendata(
+        account,
+        collection,
+        name,
+        description,
+        maximum,
+        uri,
+        royalty_payee_address,
+        royalty_points_denominator,
+        royalty_points_numerator,
+        token_mut_config,
+        property_keys,
+        property_values,
+        property_types
+    );
+
+    mint_token(
+        account,
+        tokendata_id,
+        balance,
+    );
+}
+
+ + + +
+ + + +## Function `mint_script` + +Mint more token from an existing token_data. Mint only adds more token to property_version 0 + + +
public entry fun mint_script(account: &signer, token_data_address: address, collection: string::String, name: string::String, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun mint_script(
+    account: &signer,
+    token_data_address: address,
+    collection: String,
+    name: String,
+    amount: u64,
+) acquires Collections, TokenStore {
+    let token_data_id = create_token_data_id(
+        token_data_address,
+        collection,
+        name,
+    );
+    // only creator of the tokendata can mint more tokens for now
+    assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY));
+    mint_token(
+        account,
+        token_data_id,
+        amount,
+    );
+}
+
+ + + +
+ + + +## Function `mutate_token_properties` + +mutate the token property and save the new property in TokenStore +if the token property_version is 0, we will create a new property_version per token to generate a new token_id per token +if the token property_version is not 0, we will just update the propertyMap and use the existing token_id (property_version) + + +
public entry fun mutate_token_properties(account: &signer, token_owner: address, creator: address, collection_name: string::String, token_name: string::String, token_property_version: u64, amount: u64, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + + +
+Implementation + + +
public entry fun mutate_token_properties(
+    account: &signer,
+    token_owner: address,
+    creator: address,
+    collection_name: String,
+    token_name: String,
+    token_property_version: u64,
+    amount: u64,
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    types: vector<String>,
+) acquires Collections, TokenStore {
+    assert!(signer::address_of(account) == creator, error::not_found(ENO_MUTATE_CAPABILITY));
+    let i = 0;
+    let token_id = create_token_id_raw(
+        creator,
+        collection_name,
+        token_name,
+        token_property_version,
+    );
+    // give a new property_version for each token
+    while (i < amount) {
+        mutate_one_token(account, token_owner, token_id, keys, values, types);
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `direct_transfer_script` + + + +
public entry fun direct_transfer_script(sender: &signer, receiver: &signer, creators_address: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun direct_transfer_script(
+    sender: &signer,
+    receiver: &signer,
+    creators_address: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+    amount: u64,
+) acquires TokenStore {
+    let token_id = create_token_id_raw(creators_address, collection, name, property_version);
+    direct_transfer(sender, receiver, token_id, amount);
+}
+
+ + + +
+ + + +## Function `opt_in_direct_transfer` + + + +
public entry fun opt_in_direct_transfer(account: &signer, opt_in: bool)
+
+ + + +
+Implementation + + +
public entry fun opt_in_direct_transfer(account: &signer, opt_in: bool) acquires TokenStore {
+    let addr = signer::address_of(account);
+    initialize_token_store(account);
+    let opt_in_flag = &mut borrow_global_mut<TokenStore>(addr).direct_transfer;
+    *opt_in_flag = opt_in;
+    token_event_store::emit_token_opt_in_event(account, opt_in);
+}
+
+ + + +
+ + + +## Function `transfer_with_opt_in` + +Transfers amount of tokens from from to to. +The receiver to has to opt-in direct transfer first + + +
public entry fun transfer_with_opt_in(from: &signer, creator: address, collection_name: string::String, token_name: string::String, token_property_version: u64, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer_with_opt_in(
+    from: &signer,
+    creator: address,
+    collection_name: String,
+    token_name: String,
+    token_property_version: u64,
+    to: address,
+    amount: u64,
+) acquires TokenStore {
+    let token_id = create_token_id_raw(creator, collection_name, token_name, token_property_version);
+    transfer(from, token_id, to, amount);
+}
+
+ + + +
+ + + +## Function `burn_by_creator` + +Burn a token by creator when the token's BURNABLE_BY_CREATOR is true +The token is owned at address owner + + +
public entry fun burn_by_creator(creator: &signer, owner: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun burn_by_creator(
+    creator: &signer,
+    owner: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+    amount: u64,
+) acquires Collections, TokenStore {
+    let creator_address = signer::address_of(creator);
+    assert!(amount > 0, error::invalid_argument(ENO_BURN_TOKEN_WITH_ZERO_AMOUNT));
+    let token_id = create_token_id_raw(creator_address, collection, name, property_version);
+    let creator_addr = token_id.token_data_id.creator;
+    assert!(
+        exists<Collections>(creator_addr),
+        error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
+    );
+
+    let collections = borrow_global_mut<Collections>(creator_address);
+    assert!(
+        table::contains(&collections.token_data, token_id.token_data_id),
+        error::not_found(ETOKEN_DATA_NOT_PUBLISHED),
+    );
+
+    let token_data = table::borrow_mut(
+        &mut collections.token_data,
+        token_id.token_data_id,
+    );
+
+    // The property should be explicitly set in the property_map for creator to burn the token
+    assert!(
+        property_map::contains_key(&token_data.default_properties, &string::utf8(BURNABLE_BY_CREATOR)),
+        error::permission_denied(ECREATOR_CANNOT_BURN_TOKEN)
+    );
+
+    let burn_by_creator_flag = property_map::read_bool(&token_data.default_properties, &string::utf8(BURNABLE_BY_CREATOR));
+    assert!(burn_by_creator_flag, error::permission_denied(ECREATOR_CANNOT_BURN_TOKEN));
+
+    // Burn the tokens.
+    let Token { id: _, amount: burned_amount, token_properties: _ } = withdraw_with_event_internal(owner, token_id, amount);
+    let token_store = borrow_global_mut<TokenStore>(owner);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(BurnToken { id: token_id, amount: burned_amount });
+    };
+    event::emit_event<BurnTokenEvent>(
+        &mut token_store.burn_events,
+        BurnTokenEvent { id: token_id, amount: burned_amount }
+    );
+
+    if (token_data.maximum > 0) {
+        token_data.supply = token_data.supply - burned_amount;
+
+        // Delete the token_data if supply drops to 0.
+        if (token_data.supply == 0) {
+            destroy_token_data(table::remove(&mut collections.token_data, token_id.token_data_id));
+
+            // update the collection supply
+            let collection_data = table::borrow_mut(
+                &mut collections.collection_data,
+                token_id.token_data_id.collection
+            );
+            if (collection_data.maximum > 0) {
+                collection_data.supply = collection_data.supply - 1;
+                // delete the collection data if the collection supply equals 0
+                if (collection_data.supply == 0) {
+                    destroy_collection_data(table::remove(&mut collections.collection_data, collection_data.name));
+                };
+            };
+        };
+    };
+}
+
+ + + +
+ + + +## Function `burn` + +Burn a token by the token owner + + +
public entry fun burn(owner: &signer, creators_address: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun burn(
+    owner: &signer,
+    creators_address: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+    amount: u64
+) acquires Collections, TokenStore {
+    assert!(amount > 0, error::invalid_argument(ENO_BURN_TOKEN_WITH_ZERO_AMOUNT));
+    let token_id = create_token_id_raw(creators_address, collection, name, property_version);
+    let creator_addr = token_id.token_data_id.creator;
+    assert!(
+        exists<Collections>(creator_addr),
+        error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
+    );
+
+    let collections = borrow_global_mut<Collections>(creator_addr);
+    assert!(
+        table::contains(&collections.token_data, token_id.token_data_id),
+        error::not_found(ETOKEN_DATA_NOT_PUBLISHED),
+    );
+
+    let token_data = table::borrow_mut(
+        &mut collections.token_data,
+        token_id.token_data_id,
+    );
+
+    assert!(
+        property_map::contains_key(&token_data.default_properties, &string::utf8(BURNABLE_BY_OWNER)),
+        error::permission_denied(EOWNER_CANNOT_BURN_TOKEN)
+    );
+    let burn_by_owner_flag = property_map::read_bool(&token_data.default_properties, &string::utf8(BURNABLE_BY_OWNER));
+    assert!(burn_by_owner_flag, error::permission_denied(EOWNER_CANNOT_BURN_TOKEN));
+
+    // Burn the tokens.
+    let Token { id: _, amount: burned_amount, token_properties: _ } = withdraw_token(owner, token_id, amount);
+    let token_store = borrow_global_mut<TokenStore>(signer::address_of(owner));
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(BurnToken { id: token_id, amount: burned_amount });
+    };
+    event::emit_event<BurnTokenEvent>(
+        &mut token_store.burn_events,
+        BurnTokenEvent { id: token_id, amount: burned_amount }
+    );
+
+    // Decrease the supply correspondingly by the amount of tokens burned.
+    let token_data = table::borrow_mut(
+        &mut collections.token_data,
+        token_id.token_data_id,
+    );
+
+    // only update the supply if we tracking the supply and maximal
+    // maximal == 0 is reserved for unlimited token and collection with no tracking info.
+    if (token_data.maximum > 0) {
+        token_data.supply = token_data.supply - burned_amount;
+
+        // Delete the token_data if supply drops to 0.
+        if (token_data.supply == 0) {
+            destroy_token_data(table::remove(&mut collections.token_data, token_id.token_data_id));
+
+            // update the collection supply
+            let collection_data = table::borrow_mut(
+                &mut collections.collection_data,
+                token_id.token_data_id.collection
+            );
+
+            // only update and check the supply for unlimited collection
+            if (collection_data.maximum > 0){
+                collection_data.supply = collection_data.supply - 1;
+                // delete the collection data if the collection supply equals 0
+                if (collection_data.supply == 0) {
+                    destroy_collection_data(table::remove(&mut collections.collection_data, collection_data.name));
+                };
+            };
+        };
+    };
+}
+
+ + + +
+ + + +## Function `mutate_collection_description` + + + +
public fun mutate_collection_description(creator: &signer, collection_name: string::String, description: string::String)
+
+ + + +
+Implementation + + +
public fun mutate_collection_description(creator: &signer, collection_name: String, description: String) acquires Collections {
+    let creator_address = signer::address_of(creator);
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    assert!(collection_data.mutability_config.description, error::permission_denied(EFIELD_NOT_MUTABLE));
+    token_event_store::emit_collection_description_mutate_event(creator, collection_name, collection_data.description, description);
+    collection_data.description = description;
+}
+
+ + + +
+ + + +## Function `mutate_collection_uri` + + + +
public fun mutate_collection_uri(creator: &signer, collection_name: string::String, uri: string::String)
+
+ + + +
+Implementation + + +
public fun mutate_collection_uri(creator: &signer, collection_name: String, uri: String) acquires Collections {
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
+    let creator_address = signer::address_of(creator);
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    assert!(collection_data.mutability_config.uri, error::permission_denied(EFIELD_NOT_MUTABLE));
+    token_event_store::emit_collection_uri_mutate_event(creator, collection_name, collection_data.uri , uri);
+    collection_data.uri = uri;
+}
+
+ + + +
+ + + +## Function `mutate_collection_maximum` + + + +
public fun mutate_collection_maximum(creator: &signer, collection_name: string::String, maximum: u64)
+
+ + + +
+Implementation + + +
public fun mutate_collection_maximum(creator: &signer, collection_name: String, maximum: u64) acquires Collections {
+    let creator_address = signer::address_of(creator);
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    // cannot change maximum from 0 and cannot change maximum to 0
+    assert!(collection_data.maximum != 0 && maximum != 0, error::invalid_argument(EINVALID_MAXIMUM));
+    assert!(maximum >= collection_data.supply, error::invalid_argument(EINVALID_MAXIMUM));
+    assert!(collection_data.mutability_config.maximum, error::permission_denied(EFIELD_NOT_MUTABLE));
+    token_event_store::emit_collection_maximum_mutate_event(creator, collection_name, collection_data.maximum, maximum);
+    collection_data.maximum = maximum;
+}
+
+ + + +
+ + + +## Function `mutate_tokendata_maximum` + + + +
public fun mutate_tokendata_maximum(creator: &signer, token_data_id: token::TokenDataId, maximum: u64)
+
+ + + +
+Implementation + + +
public fun mutate_tokendata_maximum(creator: &signer, token_data_id: TokenDataId, maximum: u64) acquires Collections {
+    assert_tokendata_exists(creator, token_data_id);
+    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    // cannot change maximum from 0 and cannot change maximum to 0
+    assert!(token_data.maximum != 0 && maximum != 0, error::invalid_argument(EINVALID_MAXIMUM));
+    assert!(maximum >= token_data.supply, error::invalid_argument(EINVALID_MAXIMUM));
+    assert!(token_data.mutability_config.maximum, error::permission_denied(EFIELD_NOT_MUTABLE));
+    token_event_store::emit_token_maximum_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.maximum, maximum);
+    token_data.maximum = maximum;
+}
+
+ + + +
+ + + +## Function `mutate_tokendata_uri` + + + +
public fun mutate_tokendata_uri(creator: &signer, token_data_id: token::TokenDataId, uri: string::String)
+
+ + + +
+Implementation + + +
public fun mutate_tokendata_uri(
+    creator: &signer,
+    token_data_id: TokenDataId,
+    uri: String
+) acquires Collections {
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
+    assert_tokendata_exists(creator, token_data_id);
+
+    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    assert!(token_data.mutability_config.uri, error::permission_denied(EFIELD_NOT_MUTABLE));
+    token_event_store::emit_token_uri_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.uri ,uri);
+    token_data.uri = uri;
+}
+
+ + + +
+ + + +## Function `mutate_tokendata_royalty` + + + +
public fun mutate_tokendata_royalty(creator: &signer, token_data_id: token::TokenDataId, royalty: token::Royalty)
+
+ + + +
+Implementation + + +
public fun mutate_tokendata_royalty(creator: &signer, token_data_id: TokenDataId, royalty: Royalty) acquires Collections {
+    assert_tokendata_exists(creator, token_data_id);
+
+    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    assert!(token_data.mutability_config.royalty, error::permission_denied(EFIELD_NOT_MUTABLE));
+
+    token_event_store::emit_token_royalty_mutate_event(
+        creator,
+        token_data_id.collection,
+        token_data_id.name,
+        token_data.royalty.royalty_points_numerator,
+        token_data.royalty.royalty_points_denominator,
+        token_data.royalty.payee_address,
+        royalty.royalty_points_numerator,
+        royalty.royalty_points_denominator,
+        royalty.payee_address
+    );
+    token_data.royalty = royalty;
+}
+
+ + + +
+ + + +## Function `mutate_tokendata_description` + + + +
public fun mutate_tokendata_description(creator: &signer, token_data_id: token::TokenDataId, description: string::String)
+
+ + + +
+Implementation + + +
public fun mutate_tokendata_description(creator: &signer, token_data_id: TokenDataId, description: String) acquires Collections {
+    assert_tokendata_exists(creator, token_data_id);
+
+    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    assert!(token_data.mutability_config.description, error::permission_denied(EFIELD_NOT_MUTABLE));
+    token_event_store::emit_token_descrition_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.description, description);
+    token_data.description = description;
+}
+
+ + + +
+ + + +## Function `mutate_tokendata_property` + +Allow creator to mutate the default properties in TokenData + + +
public fun mutate_tokendata_property(creator: &signer, token_data_id: token::TokenDataId, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + + +
+Implementation + + +
public fun mutate_tokendata_property(
+    creator: &signer,
+    token_data_id: TokenDataId,
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    types: vector<String>,
+) acquires Collections {
+    assert_tokendata_exists(creator, token_data_id);
+    let key_len = vector::length(&keys);
+    let val_len = vector::length(&values);
+    let typ_len = vector::length(&types);
+    assert!(key_len == val_len, error::invalid_state(ETOKEN_PROPERTIES_COUNT_NOT_MATCH));
+    assert!(key_len == typ_len, error::invalid_state(ETOKEN_PROPERTIES_COUNT_NOT_MATCH));
+
+    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    assert!(token_data.mutability_config.properties, error::permission_denied(EFIELD_NOT_MUTABLE));
+    let i: u64 = 0;
+    let old_values: vector<Option<PropertyValue>> = vector::empty();
+    let new_values: vector<PropertyValue> = vector::empty();
+    assert_non_standard_reserved_property(&keys);
+    while (i < vector::length(&keys)){
+        let key = vector::borrow(&keys, i);
+        let old_pv = if (property_map::contains_key(&token_data.default_properties, key)) {
+            option::some(*property_map::borrow(&token_data.default_properties, key))
+        } else {
+            option::none<PropertyValue>()
+        };
+        vector::push_back(&mut old_values, old_pv);
+        let new_pv = property_map::create_property_value_raw(*vector::borrow(&values, i), *vector::borrow(&types, i));
+        vector::push_back(&mut new_values, new_pv);
+        if (option::is_some(&old_pv)) {
+            property_map::update_property_value(&mut token_data.default_properties, key, new_pv);
+        } else {
+            property_map::add(&mut token_data.default_properties, *key, new_pv);
+        };
+        i = i + 1;
+    };
+    token_event_store::emit_default_property_mutate_event(creator, token_data_id.collection, token_data_id.name, keys, old_values, new_values);
+}
+
+ + + +
+ + + +## Function `mutate_one_token` + +Mutate the token_properties of one token. + + +
public fun mutate_one_token(account: &signer, token_owner: address, token_id: token::TokenId, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>): token::TokenId
+
+ + + +
+Implementation + + +
public fun mutate_one_token(
+    account: &signer,
+    token_owner: address,
+    token_id: TokenId,
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    types: vector<String>,
+): TokenId acquires Collections, TokenStore {
+    let creator = token_id.token_data_id.creator;
+    assert!(signer::address_of(account) == creator, error::permission_denied(ENO_MUTATE_CAPABILITY));
+    // validate if the properties is mutable
+    assert!(exists<Collections>(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &mut borrow_global_mut<Collections>(
+        creator
+    ).token_data;
+
+    assert!(table::contains(all_token_data, token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let token_data = table::borrow_mut(all_token_data, token_id.token_data_id);
+
+    // if default property is mutatable, token property is alwasy mutable
+    // we only need to check TOKEN_PROPERTY_MUTABLE when default property is immutable
+    if (!token_data.mutability_config.properties) {
+        assert!(
+            property_map::contains_key(&token_data.default_properties, &string::utf8(TOKEN_PROPERTY_MUTABLE)),
+            error::permission_denied(EFIELD_NOT_MUTABLE)
+        );
+
+        let token_prop_mutable = property_map::read_bool(&token_data.default_properties, &string::utf8(TOKEN_PROPERTY_MUTABLE));
+        assert!(token_prop_mutable, error::permission_denied(EFIELD_NOT_MUTABLE));
+    };
+
+    // check if the property_version is 0 to determine if we need to update the property_version
+    if (token_id.property_version == 0) {
+        let token = withdraw_with_event_internal(token_owner, token_id, 1);
+        // give a new property_version for each token
+        let cur_property_version = token_data.largest_property_version + 1;
+        let new_token_id = create_token_id(token_id.token_data_id, cur_property_version);
+        let new_token = Token {
+            id: new_token_id,
+            amount: 1,
+            token_properties: token_data.default_properties,
+        };
+        direct_deposit(token_owner, new_token);
+        update_token_property_internal(token_owner, new_token_id, keys, values, types);
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(MutateTokenPropertyMap {
+                old_id: token_id,
+                new_id: new_token_id,
+                keys,
+                values,
+                types
+            });
+        };
+        event::emit_event<MutateTokenPropertyMapEvent>(
+            &mut borrow_global_mut<TokenStore>(token_owner).mutate_token_property_events,
+            MutateTokenPropertyMapEvent {
+                old_id: token_id,
+                new_id: new_token_id,
+                keys,
+                values,
+                types
+            },
+        );
+
+        token_data.largest_property_version = cur_property_version;
+        // burn the orignial property_version 0 token after mutation
+        let Token { id: _, amount: _, token_properties: _ } = token;
+        new_token_id
+    } else {
+        // only 1 copy for the token with property verion bigger than 0
+        update_token_property_internal(token_owner, token_id, keys, values, types);
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(MutateTokenPropertyMap {
+                old_id: token_id,
+                new_id: token_id,
+                keys,
+                values,
+                types
+            });
+        };
+        event::emit_event<MutateTokenPropertyMapEvent>(
+            &mut borrow_global_mut<TokenStore>(token_owner).mutate_token_property_events,
+            MutateTokenPropertyMapEvent {
+                old_id: token_id,
+                new_id: token_id,
+                keys,
+                values,
+                types
+            },
+        );
+        token_id
+    }
+}
+
+ + + +
+ + + +## Function `create_royalty` + + + +
public fun create_royalty(royalty_points_numerator: u64, royalty_points_denominator: u64, payee_address: address): token::Royalty
+
+ + + +
+Implementation + + +
public fun create_royalty(royalty_points_numerator: u64, royalty_points_denominator: u64, payee_address: address): Royalty {
+    assert!(royalty_points_numerator <= royalty_points_denominator, error::invalid_argument(EINVALID_ROYALTY_NUMERATOR_DENOMINATOR));
+    assert!(account::exists_at(payee_address), error::invalid_argument(EROYALTY_PAYEE_ACCOUNT_DOES_NOT_EXIST));
+    Royalty {
+        royalty_points_numerator,
+        royalty_points_denominator,
+        payee_address
+    }
+}
+
+ + + +
+ + + +## Function `deposit_token` + +Deposit the token balance into the owner's account and emit an event. + + +
public fun deposit_token(account: &signer, token: token::Token)
+
+ + + +
+Implementation + + +
public fun deposit_token(account: &signer, token: Token) acquires TokenStore {
+    let account_addr = signer::address_of(account);
+    initialize_token_store(account);
+    direct_deposit(account_addr, token)
+}
+
+ + + +
+ + + +## Function `direct_deposit_with_opt_in` + +direct deposit if user opt in direct transfer + + +
public fun direct_deposit_with_opt_in(account_addr: address, token: token::Token)
+
+ + + +
+Implementation + + +
public fun direct_deposit_with_opt_in(account_addr: address, token: Token) acquires TokenStore {
+    let opt_in_transfer = borrow_global<TokenStore>(account_addr).direct_transfer;
+    assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER));
+    direct_deposit(account_addr, token);
+}
+
+ + + +
+ + + +## Function `direct_transfer` + + + +
public fun direct_transfer(sender: &signer, receiver: &signer, token_id: token::TokenId, amount: u64)
+
+ + + +
+Implementation + + +
public fun direct_transfer(
+    sender: &signer,
+    receiver: &signer,
+    token_id: TokenId,
+    amount: u64,
+) acquires TokenStore {
+    let token = withdraw_token(sender, token_id, amount);
+    deposit_token(receiver, token);
+}
+
+ + + +
+ + + +## Function `initialize_token_store` + + + +
public fun initialize_token_store(account: &signer)
+
+ + + +
+Implementation + + +
public fun initialize_token_store(account: &signer) {
+    if (!exists<TokenStore>(signer::address_of(account))) {
+        move_to(
+            account,
+            TokenStore {
+                tokens: table::new(),
+                direct_transfer: false,
+                deposit_events: account::new_event_handle<DepositEvent>(account),
+                withdraw_events: account::new_event_handle<WithdrawEvent>(account),
+                burn_events: account::new_event_handle<BurnTokenEvent>(account),
+                mutate_token_property_events: account::new_event_handle<MutateTokenPropertyMapEvent>(account),
+            },
+        );
+    }
+}
+
+ + + +
+ + + +## Function `merge` + + + +
public fun merge(dst_token: &mut token::Token, source_token: token::Token)
+
+ + + +
+Implementation + + +
public fun merge(dst_token: &mut Token, source_token: Token) {
+    assert!(&dst_token.id == &source_token.id, error::invalid_argument(EINVALID_TOKEN_MERGE));
+    dst_token.amount = dst_token.amount + source_token.amount;
+    let Token { id: _, amount: _, token_properties: _ } = source_token;
+}
+
+ + + +
+ + + +## Function `split` + + + +
public fun split(dst_token: &mut token::Token, amount: u64): token::Token
+
+ + + +
+Implementation + + +
public fun split(dst_token: &mut Token, amount: u64): Token {
+    assert!(dst_token.id.property_version == 0, error::invalid_state(ENFT_NOT_SPLITABLE));
+    assert!(dst_token.amount > amount, error::invalid_argument(ETOKEN_SPLIT_AMOUNT_LARGER_OR_EQUAL_TO_TOKEN_AMOUNT));
+    assert!(amount > 0, error::invalid_argument(ETOKEN_CANNOT_HAVE_ZERO_AMOUNT));
+    dst_token.amount = dst_token.amount - amount;
+    Token {
+        id: dst_token.id,
+        amount,
+        token_properties: property_map::empty(),
+    }
+}
+
+ + + +
+ + + +## Function `token_id` + + + +
public fun token_id(token: &token::Token): &token::TokenId
+
+ + + +
+Implementation + + +
public fun token_id(token: &Token): &TokenId {
+    &token.id
+}
+
+ + + +
+ + + +## Function `transfer` + +Transfers amount of tokens from from to to. + + +
public fun transfer(from: &signer, id: token::TokenId, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public fun transfer(
+    from: &signer,
+    id: TokenId,
+    to: address,
+    amount: u64,
+) acquires TokenStore {
+    let opt_in_transfer = borrow_global<TokenStore>(to).direct_transfer;
+    assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER));
+    let token = withdraw_token(from, id, amount);
+    direct_deposit(to, token);
+}
+
+ + + +
+ + + +## Function `create_withdraw_capability` + +Token owner can create this one-time withdraw capability with an expiration time + + +
public fun create_withdraw_capability(owner: &signer, token_id: token::TokenId, amount: u64, expiration_sec: u64): token::WithdrawCapability
+
+ + + +
+Implementation + + +
public fun create_withdraw_capability(
+    owner: &signer,
+    token_id: TokenId,
+    amount: u64,
+    expiration_sec: u64,
+): WithdrawCapability {
+    WithdrawCapability {
+        token_owner: signer::address_of(owner),
+        token_id,
+        amount,
+        expiration_sec,
+    }
+}
+
+ + + +
+ + + +## Function `withdraw_with_capability` + +Withdraw the token with a capability + + +
public fun withdraw_with_capability(withdraw_proof: token::WithdrawCapability): token::Token
+
+ + + +
+Implementation + + +
public fun withdraw_with_capability(
+    withdraw_proof: WithdrawCapability,
+): Token acquires TokenStore {
+    // verify the delegation hasn't expired yet
+    assert!(timestamp::now_seconds() <= withdraw_proof.expiration_sec, error::invalid_argument(EWITHDRAW_PROOF_EXPIRES));
+
+    withdraw_with_event_internal(
+        withdraw_proof.token_owner,
+        withdraw_proof.token_id,
+        withdraw_proof.amount,
+    )
+}
+
+ + + +
+ + + +## Function `partial_withdraw_with_capability` + +Withdraw the token with a capability. + + +
public fun partial_withdraw_with_capability(withdraw_proof: token::WithdrawCapability, withdraw_amount: u64): (token::Token, option::Option<token::WithdrawCapability>)
+
+ + + +
+Implementation + + +
public fun partial_withdraw_with_capability(
+    withdraw_proof: WithdrawCapability,
+    withdraw_amount: u64,
+): (Token, Option<WithdrawCapability>) acquires TokenStore {
+    // verify the delegation hasn't expired yet
+    assert!(timestamp::now_seconds() <= withdraw_proof.expiration_sec, error::invalid_argument(EWITHDRAW_PROOF_EXPIRES));
+
+    assert!(withdraw_amount <= withdraw_proof.amount, error::invalid_argument(EINSUFFICIENT_WITHDRAW_CAPABILITY_AMOUNT));
+
+    let res: Option<WithdrawCapability> = if (withdraw_amount == withdraw_proof.amount) {
+        option::none<WithdrawCapability>()
+    } else {
+        option::some(
+            WithdrawCapability {
+                token_owner: withdraw_proof.token_owner,
+                token_id: withdraw_proof.token_id,
+                amount: withdraw_proof.amount - withdraw_amount,
+                expiration_sec: withdraw_proof.expiration_sec,
+            }
+        )
+    };
+
+    (
+        withdraw_with_event_internal(
+            withdraw_proof.token_owner,
+            withdraw_proof.token_id,
+            withdraw_amount,
+        ),
+        res
+    )
+
+}
+
+ + + +
+ + + +## Function `withdraw_token` + + + +
public fun withdraw_token(account: &signer, id: token::TokenId, amount: u64): token::Token
+
+ + + +
+Implementation + + +
public fun withdraw_token(
+    account: &signer,
+    id: TokenId,
+    amount: u64,
+): Token acquires TokenStore {
+    let account_addr = signer::address_of(account);
+    withdraw_with_event_internal(account_addr, id, amount)
+}
+
+ + + +
+ + + +## Function `create_collection` + +Create a new collection to hold tokens + + +
public fun create_collection(creator: &signer, name: string::String, description: string::String, uri: string::String, maximum: u64, mutate_setting: vector<bool>)
+
+ + + +
+Implementation + + +
public fun create_collection(
+    creator: &signer,
+    name: String,
+    description: String,
+    uri: String,
+    maximum: u64,
+    mutate_setting: vector<bool>
+) acquires Collections {
+    assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG));
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
+    let account_addr = signer::address_of(creator);
+    if (!exists<Collections>(account_addr)) {
+        move_to(
+            creator,
+            Collections {
+                collection_data: table::new(),
+                token_data: table::new(),
+                create_collection_events: account::new_event_handle<CreateCollectionEvent>(creator),
+                create_token_data_events: account::new_event_handle<CreateTokenDataEvent>(creator),
+                mint_token_events: account::new_event_handle<MintTokenEvent>(creator),
+            },
+        )
+    };
+
+    let collection_data = &mut borrow_global_mut<Collections>(account_addr).collection_data;
+
+    assert!(
+        !table::contains(collection_data, name),
+        error::already_exists(ECOLLECTION_ALREADY_EXISTS),
+    );
+
+    let mutability_config = create_collection_mutability_config(&mutate_setting);
+    let collection = CollectionData {
+        description,
+        name: name,
+        uri,
+        supply: 0,
+        maximum,
+        mutability_config
+    };
+
+    table::add(collection_data, name, collection);
+    let collection_handle = borrow_global_mut<Collections>(account_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CreateCollection {
+                creator: account_addr,
+                collection_name: name,
+                uri,
+                description,
+                maximum,
+            }
+        );
+    };
+    event::emit_event<CreateCollectionEvent>(
+        &mut collection_handle.create_collection_events,
+        CreateCollectionEvent {
+            creator: account_addr,
+            collection_name: name,
+            uri,
+            description,
+            maximum,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `check_collection_exists` + + + +
public fun check_collection_exists(creator: address, name: string::String): bool
+
+ + + +
+Implementation + + +
public fun check_collection_exists(creator: address, name: String): bool acquires Collections {
+    assert!(
+        exists<Collections>(creator),
+        error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
+    );
+
+    let collection_data = &borrow_global<Collections>(creator).collection_data;
+    table::contains(collection_data, name)
+}
+
+ + + +
+ + + +## Function `check_tokendata_exists` + + + +
public fun check_tokendata_exists(creator: address, collection_name: string::String, token_name: string::String): bool
+
+ + + +
+Implementation + + +
public fun check_tokendata_exists(creator: address, collection_name: String, token_name: String): bool acquires Collections {
+    assert!(
+        exists<Collections>(creator),
+        error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
+    );
+
+    let token_data = &borrow_global<Collections>(creator).token_data;
+    let token_data_id = create_token_data_id(creator, collection_name, token_name);
+    table::contains(token_data, token_data_id)
+}
+
+ + + +
+ + + +## Function `create_tokendata` + + + +
public fun create_tokendata(account: &signer, collection: string::String, name: string::String, description: string::String, maximum: u64, uri: string::String, royalty_payee_address: address, royalty_points_denominator: u64, royalty_points_numerator: u64, token_mutate_config: token::TokenMutabilityConfig, property_keys: vector<string::String>, property_values: vector<vector<u8>>, property_types: vector<string::String>): token::TokenDataId
+
+ + + +
+Implementation + + +
public fun create_tokendata(
+    account: &signer,
+    collection: String,
+    name: String,
+    description: String,
+    maximum: u64,
+    uri: String,
+    royalty_payee_address: address,
+    royalty_points_denominator: u64,
+    royalty_points_numerator: u64,
+    token_mutate_config: TokenMutabilityConfig,
+    property_keys: vector<String>,
+    property_values: vector<vector<u8>>,
+    property_types: vector<String>
+): TokenDataId acquires Collections {
+    assert!(string::length(&name) <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG));
+    assert!(string::length(&collection) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG));
+    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
+    assert!(royalty_points_numerator <= royalty_points_denominator, error::invalid_argument(EINVALID_ROYALTY_NUMERATOR_DENOMINATOR));
+
+    let account_addr = signer::address_of(account);
+    assert!(
+        exists<Collections>(account_addr),
+        error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
+    );
+    let collections = borrow_global_mut<Collections>(account_addr);
+
+    let token_data_id = create_token_data_id(account_addr, collection, name);
+
+    assert!(
+        table::contains(&collections.collection_data, token_data_id.collection),
+        error::not_found(ECOLLECTION_NOT_PUBLISHED),
+    );
+    assert!(
+        !table::contains(&collections.token_data, token_data_id),
+        error::already_exists(ETOKEN_DATA_ALREADY_EXISTS),
+    );
+
+    let collection = table::borrow_mut(&mut collections.collection_data, token_data_id.collection);
+
+    // if collection maximum == 0, user don't want to enforce supply constraint.
+    // we don't track supply to make token creation parallelizable
+    if (collection.maximum > 0) {
+        collection.supply = collection.supply + 1;
+        assert!(
+            collection.maximum >= collection.supply,
+            error::invalid_argument(ECREATE_WOULD_EXCEED_COLLECTION_MAXIMUM),
+        );
+    };
+
+    let token_data = TokenData {
+        maximum,
+        largest_property_version: 0,
+        supply: 0,
+        uri,
+        royalty: create_royalty(royalty_points_numerator, royalty_points_denominator, royalty_payee_address),
+        name,
+        description,
+        default_properties: property_map::new(property_keys, property_values, property_types),
+        mutability_config: token_mutate_config,
+    };
+
+    table::add(&mut collections.token_data, token_data_id, token_data);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CreateTokenData {
+                id: token_data_id,
+                description,
+                maximum,
+                uri,
+                royalty_payee_address,
+                royalty_points_denominator,
+                royalty_points_numerator,
+                name,
+                mutability_config: token_mutate_config,
+                property_keys,
+                property_values,
+                property_types,
+            }
+        );
+    };
+
+    event::emit_event<CreateTokenDataEvent>(
+        &mut collections.create_token_data_events,
+        CreateTokenDataEvent {
+            id: token_data_id,
+            description,
+            maximum,
+            uri,
+            royalty_payee_address,
+            royalty_points_denominator,
+            royalty_points_numerator,
+            name,
+            mutability_config: token_mutate_config,
+            property_keys,
+            property_values,
+            property_types,
+        },
+    );
+    token_data_id
+}
+
+ + + +
+ + + +## Function `get_collection_supply` + +return the number of distinct token_data_id created under this collection + + +
public fun get_collection_supply(creator_address: address, collection_name: string::String): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun get_collection_supply(creator_address: address, collection_name: String): Option<u64> acquires Collections {
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+
+    if (collection_data.maximum > 0) {
+        option::some(collection_data.supply)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `get_collection_description` + + + +
public fun get_collection_description(creator_address: address, collection_name: string::String): string::String
+
+ + + +
+Implementation + + +
public fun get_collection_description(creator_address: address, collection_name: String): String acquires Collections {
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    collection_data.description
+}
+
+ + + +
+ + + +## Function `get_collection_uri` + + + +
public fun get_collection_uri(creator_address: address, collection_name: string::String): string::String
+
+ + + +
+Implementation + + +
public fun get_collection_uri(creator_address: address, collection_name: String): String acquires Collections {
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    collection_data.uri
+}
+
+ + + +
+ + + +## Function `get_collection_maximum` + + + +
public fun get_collection_maximum(creator_address: address, collection_name: string::String): u64
+
+ + + +
+Implementation + + +
public fun get_collection_maximum(creator_address: address, collection_name: String): u64 acquires Collections {
+    assert_collection_exists(creator_address, collection_name);
+    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    collection_data.maximum
+}
+
+ + + +
+ + + +## Function `get_token_supply` + +return the number of distinct token_id created under this TokenData + + +
public fun get_token_supply(creator_address: address, token_data_id: token::TokenDataId): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun get_token_supply(creator_address: address, token_data_id: TokenDataId): Option<u64> acquires Collections {
+    assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let token_data = table::borrow(all_token_data, token_data_id);
+
+    if (token_data.maximum > 0) {
+        option::some(token_data.supply)
+    } else {
+        option::none<u64>()
+    }
+}
+
+ + + +
+ + + +## Function `get_tokendata_largest_property_version` + +return the largest_property_version of this TokenData + + +
public fun get_tokendata_largest_property_version(creator_address: address, token_data_id: token::TokenDataId): u64
+
+ + + +
+Implementation + + +
public fun get_tokendata_largest_property_version(creator_address: address, token_data_id: TokenDataId): u64 acquires Collections {
+    assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    table::borrow(all_token_data, token_data_id).largest_property_version
+}
+
+ + + +
+ + + +## Function `get_token_id` + +return the TokenId for a given Token + + +
public fun get_token_id(token: &token::Token): token::TokenId
+
+ + + +
+Implementation + + +
public fun get_token_id(token: &Token): TokenId {
+    token.id
+}
+
+ + + +
+ + + +## Function `get_direct_transfer` + + + +
public fun get_direct_transfer(receiver: address): bool
+
+ + + +
+Implementation + + +
public fun get_direct_transfer(receiver: address): bool acquires TokenStore {
+    if (!exists<TokenStore>(receiver)) {
+        return false
+    };
+
+    borrow_global<TokenStore>(receiver).direct_transfer
+}
+
+ + + +
+ + + +## Function `create_token_mutability_config` + + + +
public fun create_token_mutability_config(mutate_setting: &vector<bool>): token::TokenMutabilityConfig
+
+ + + +
+Implementation + + +
public fun create_token_mutability_config(mutate_setting: &vector<bool>): TokenMutabilityConfig {
+    TokenMutabilityConfig {
+        maximum: *vector::borrow(mutate_setting, TOKEN_MAX_MUTABLE_IND),
+        uri: *vector::borrow(mutate_setting, TOKEN_URI_MUTABLE_IND),
+        royalty: *vector::borrow(mutate_setting, TOKEN_ROYALTY_MUTABLE_IND),
+        description: *vector::borrow(mutate_setting, TOKEN_DESCRIPTION_MUTABLE_IND),
+        properties: *vector::borrow(mutate_setting, TOKEN_PROPERTY_MUTABLE_IND),
+    }
+}
+
+ + + +
+ + + +## Function `create_collection_mutability_config` + + + +
public fun create_collection_mutability_config(mutate_setting: &vector<bool>): token::CollectionMutabilityConfig
+
+ + + +
+Implementation + + +
public fun create_collection_mutability_config(mutate_setting: &vector<bool>): CollectionMutabilityConfig {
+    CollectionMutabilityConfig {
+        description: *vector::borrow(mutate_setting, COLLECTION_DESCRIPTION_MUTABLE_IND),
+        uri: *vector::borrow(mutate_setting, COLLECTION_URI_MUTABLE_IND),
+        maximum: *vector::borrow(mutate_setting, COLLECTION_MAX_MUTABLE_IND),
+    }
+}
+
+ + + +
+ + + +## Function `mint_token` + + + +
public fun mint_token(account: &signer, token_data_id: token::TokenDataId, amount: u64): token::TokenId
+
+ + + +
+Implementation + + +
public fun mint_token(
+    account: &signer,
+    token_data_id: TokenDataId,
+    amount: u64,
+): TokenId acquires Collections, TokenStore {
+    assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY));
+    let creator_addr = token_data_id.creator;
+    let all_token_data = &mut borrow_global_mut<Collections>(creator_addr).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+
+    if (token_data.maximum > 0) {
+        assert!(token_data.supply + amount <= token_data.maximum, error::invalid_argument(EMINT_WOULD_EXCEED_TOKEN_MAXIMUM));
+        token_data.supply = token_data.supply + amount;
+    };
+
+    // we add more tokens with property_version 0
+    let token_id = create_token_id(token_data_id, 0);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(MintToken { id: token_data_id, amount })
+    };
+    event::emit_event<MintTokenEvent>(
+        &mut borrow_global_mut<Collections>(creator_addr).mint_token_events,
+        MintTokenEvent {
+            id: token_data_id,
+            amount,
+        }
+    );
+
+    deposit_token(account,
+        Token {
+            id: token_id,
+            amount,
+            token_properties: property_map::empty(), // same as default properties no need to store
+        }
+    );
+
+    token_id
+}
+
+ + + +
+ + + +## Function `mint_token_to` + +create tokens and directly deposite to receiver's address. The receiver should opt-in direct transfer + + +
public fun mint_token_to(account: &signer, receiver: address, token_data_id: token::TokenDataId, amount: u64)
+
+ + + +
+Implementation + + +
public fun mint_token_to(
+    account: &signer,
+    receiver: address,
+    token_data_id: TokenDataId,
+    amount: u64,
+) acquires Collections, TokenStore {
+    assert!(exists<TokenStore>(receiver), error::not_found(ETOKEN_STORE_NOT_PUBLISHED));
+    let opt_in_transfer = borrow_global<TokenStore>(receiver).direct_transfer;
+    assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER));
+
+    assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY));
+    let creator_addr = token_data_id.creator;
+    let all_token_data = &mut borrow_global_mut<Collections>(creator_addr).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let token_data = table::borrow_mut(all_token_data, token_data_id);
+
+    if (token_data.maximum > 0) {
+        assert!(token_data.supply + amount <= token_data.maximum, error::invalid_argument(EMINT_WOULD_EXCEED_TOKEN_MAXIMUM));
+        token_data.supply = token_data.supply + amount;
+    };
+
+    // we add more tokens with property_version 0
+    let token_id = create_token_id(token_data_id, 0);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(MintToken { id: token_data_id, amount })
+    };
+    event::emit_event<MintTokenEvent>(
+        &mut borrow_global_mut<Collections>(creator_addr).mint_token_events,
+        MintTokenEvent {
+            id: token_data_id,
+            amount,
+        }
+    );
+
+    direct_deposit(receiver,
+        Token {
+            id: token_id,
+            amount,
+            token_properties: property_map::empty(), // same as default properties no need to store
+        }
+    );
+}
+
+ + + +
+ + + +## Function `create_token_id` + + + +
public fun create_token_id(token_data_id: token::TokenDataId, property_version: u64): token::TokenId
+
+ + + +
+Implementation + + +
public fun create_token_id(token_data_id: TokenDataId, property_version: u64): TokenId {
+    TokenId {
+        token_data_id,
+        property_version,
+    }
+}
+
+ + + +
+ + + +## Function `create_token_data_id` + + + +
public fun create_token_data_id(creator: address, collection: string::String, name: string::String): token::TokenDataId
+
+ + + +
+Implementation + + +
public fun create_token_data_id(
+    creator: address,
+    collection: String,
+    name: String,
+): TokenDataId {
+    assert!(string::length(&collection) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG));
+    assert!(string::length(&name) <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG));
+    TokenDataId { creator, collection, name }
+}
+
+ + + +
+ + + +## Function `create_token_id_raw` + + + +
public fun create_token_id_raw(creator: address, collection: string::String, name: string::String, property_version: u64): token::TokenId
+
+ + + +
+Implementation + + +
public fun create_token_id_raw(
+    creator: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+): TokenId {
+    TokenId {
+        token_data_id: create_token_data_id(creator, collection, name),
+        property_version,
+    }
+}
+
+ + + +
+ + + +## Function `balance_of` + + + +
public fun balance_of(owner: address, id: token::TokenId): u64
+
+ + + +
+Implementation + + +
public fun balance_of(owner: address, id: TokenId): u64 acquires TokenStore {
+    if (!exists<TokenStore>(owner)) {
+        return 0
+    };
+    let token_store = borrow_global<TokenStore>(owner);
+    if (table::contains(&token_store.tokens, id)) {
+        table::borrow(&token_store.tokens, id).amount
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `has_token_store` + + + +
public fun has_token_store(owner: address): bool
+
+ + + +
+Implementation + + +
public fun has_token_store(owner: address): bool {
+    exists<TokenStore>(owner)
+}
+
+ + + +
+ + + +## Function `get_royalty` + + + +
public fun get_royalty(token_id: token::TokenId): token::Royalty
+
+ + + +
+Implementation + + +
public fun get_royalty(token_id: TokenId): Royalty acquires Collections {
+    let token_data_id = token_id.token_data_id;
+    get_tokendata_royalty(token_data_id)
+}
+
+ + + +
+ + + +## Function `get_royalty_numerator` + + + +
public fun get_royalty_numerator(royalty: &token::Royalty): u64
+
+ + + +
+Implementation + + +
public fun get_royalty_numerator(royalty: &Royalty): u64 {
+    royalty.royalty_points_numerator
+}
+
+ + + +
+ + + +## Function `get_royalty_denominator` + + + +
public fun get_royalty_denominator(royalty: &token::Royalty): u64
+
+ + + +
+Implementation + + +
public fun get_royalty_denominator(royalty: &Royalty): u64 {
+    royalty.royalty_points_denominator
+}
+
+ + + +
+ + + +## Function `get_royalty_payee` + + + +
public fun get_royalty_payee(royalty: &token::Royalty): address
+
+ + + +
+Implementation + + +
public fun get_royalty_payee(royalty: &Royalty): address {
+    royalty.payee_address
+}
+
+ + + +
+ + + +## Function `get_token_amount` + + + +
public fun get_token_amount(token: &token::Token): u64
+
+ + + +
+Implementation + + +
public fun get_token_amount(token: &Token): u64 {
+    token.amount
+}
+
+ + + +
+ + + +## Function `get_token_id_fields` + +return the creator address, collection name, token name and property_version + + +
public fun get_token_id_fields(token_id: &token::TokenId): (address, string::String, string::String, u64)
+
+ + + +
+Implementation + + +
public fun get_token_id_fields(token_id: &TokenId): (address, String, String, u64) {
+    (
+        token_id.token_data_id.creator,
+        token_id.token_data_id.collection,
+        token_id.token_data_id.name,
+        token_id.property_version,
+    )
+}
+
+ + + +
+ + + +## Function `get_token_data_id_fields` + + + +
public fun get_token_data_id_fields(token_data_id: &token::TokenDataId): (address, string::String, string::String)
+
+ + + +
+Implementation + + +
public fun get_token_data_id_fields(token_data_id: &TokenDataId): (address, String, String) {
+    (
+        token_data_id.creator,
+        token_data_id.collection,
+        token_data_id.name,
+    )
+}
+
+ + + +
+ + + +## Function `get_property_map` + +return a copy of the token property map. +if property_version = 0, return the default property map +if property_version > 0, return the property value stored at owner's token store + + +
public fun get_property_map(owner: address, token_id: token::TokenId): property_map::PropertyMap
+
+ + + +
+Implementation + + +
public fun get_property_map(owner: address, token_id: TokenId): PropertyMap acquires Collections, TokenStore {
+    assert!(balance_of(owner, token_id) > 0, error::not_found(EINSUFFICIENT_BALANCE));
+    // if property_version = 0, return default property map
+    if (token_id.property_version == 0) {
+        let creator_addr = token_id.token_data_id.creator;
+        let all_token_data = &borrow_global<Collections>(creator_addr).token_data;
+        assert!(table::contains(all_token_data, token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+        let token_data = table::borrow(all_token_data, token_id.token_data_id);
+        token_data.default_properties
+    } else {
+        let tokens = &borrow_global<TokenStore>(owner).tokens;
+        table::borrow(tokens, token_id).token_properties
+    }
+}
+
+ + + +
+ + + +## Function `get_tokendata_maximum` + + + +
public fun get_tokendata_maximum(token_data_id: token::TokenDataId): u64
+
+ + + +
+Implementation + + +
public fun get_tokendata_maximum(token_data_id: TokenDataId): u64 acquires Collections {
+    let creator_address = token_data_id.creator;
+    assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+
+    let token_data = table::borrow(all_token_data, token_data_id);
+    token_data.maximum
+}
+
+ + + +
+ + + +## Function `get_tokendata_uri` + + + +
public fun get_tokendata_uri(creator: address, token_data_id: token::TokenDataId): string::String
+
+ + + +
+Implementation + + +
public fun get_tokendata_uri(creator: address, token_data_id: TokenDataId): String acquires Collections {
+    assert!(exists<Collections>(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+
+    let token_data = table::borrow(all_token_data, token_data_id);
+    token_data.uri
+}
+
+ + + +
+ + + +## Function `get_tokendata_description` + + + +
public fun get_tokendata_description(token_data_id: token::TokenDataId): string::String
+
+ + + +
+Implementation + + +
public fun get_tokendata_description(token_data_id: TokenDataId): String acquires Collections {
+    let creator_address = token_data_id.creator;
+    assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+
+    let token_data = table::borrow(all_token_data, token_data_id);
+    token_data.description
+}
+
+ + + +
+ + + +## Function `get_tokendata_royalty` + + + +
public fun get_tokendata_royalty(token_data_id: token::TokenDataId): token::Royalty
+
+ + + +
+Implementation + + +
public fun get_tokendata_royalty(token_data_id: TokenDataId): Royalty acquires Collections {
+    let creator_address = token_data_id.creator;
+    assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+
+    let token_data = table::borrow(all_token_data, token_data_id);
+    token_data.royalty
+}
+
+ + + +
+ + + +## Function `get_tokendata_id` + +return the token_data_id from the token_id + + +
public fun get_tokendata_id(token_id: token::TokenId): token::TokenDataId
+
+ + + +
+Implementation + + +
public fun get_tokendata_id(token_id: TokenId): TokenDataId {
+    token_id.token_data_id
+}
+
+ + + +
+ + + +## Function `get_tokendata_mutability_config` + +return the mutation setting of the token + + +
public fun get_tokendata_mutability_config(token_data_id: token::TokenDataId): token::TokenMutabilityConfig
+
+ + + +
+Implementation + + +
public fun get_tokendata_mutability_config(token_data_id: TokenDataId): TokenMutabilityConfig acquires Collections {
+    let creator_addr = token_data_id.creator;
+    assert!(exists<Collections>(creator_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &borrow_global<Collections>(creator_addr).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    table::borrow(all_token_data, token_data_id).mutability_config
+}
+
+ + + +
+ + + +## Function `get_token_mutability_maximum` + +return if the token's maximum is mutable + + +
public fun get_token_mutability_maximum(config: &token::TokenMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_token_mutability_maximum(config: &TokenMutabilityConfig): bool {
+    config.maximum
+}
+
+ + + +
+ + + +## Function `get_token_mutability_royalty` + +return if the token royalty is mutable with a token mutability config + + +
public fun get_token_mutability_royalty(config: &token::TokenMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_token_mutability_royalty(config: &TokenMutabilityConfig): bool {
+    config.royalty
+}
+
+ + + +
+ + + +## Function `get_token_mutability_uri` + +return if the token uri is mutable with a token mutability config + + +
public fun get_token_mutability_uri(config: &token::TokenMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_token_mutability_uri(config: &TokenMutabilityConfig): bool {
+    config.uri
+}
+
+ + + +
+ + + +## Function `get_token_mutability_description` + +return if the token description is mutable with a token mutability config + + +
public fun get_token_mutability_description(config: &token::TokenMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_token_mutability_description(config: &TokenMutabilityConfig): bool {
+    config.description
+}
+
+ + + +
+ + + +## Function `get_token_mutability_default_properties` + +return if the tokendata's default properties is mutable with a token mutability config + + +
public fun get_token_mutability_default_properties(config: &token::TokenMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_token_mutability_default_properties(config: &TokenMutabilityConfig): bool {
+    config.properties
+}
+
+ + + +
+ + + +## Function `get_collection_mutability_config` + +return the collection mutation setting + + +
#[view]
+public fun get_collection_mutability_config(creator: address, collection_name: string::String): token::CollectionMutabilityConfig
+
+ + + +
+Implementation + + +
public fun get_collection_mutability_config(
+    creator: address,
+    collection_name: String
+): CollectionMutabilityConfig acquires Collections {
+    assert!(exists<Collections>(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_collection_data = &borrow_global<Collections>(creator).collection_data;
+    assert!(table::contains(all_collection_data, collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED));
+    table::borrow(all_collection_data, collection_name).mutability_config
+}
+
+ + + +
+ + + +## Function `get_collection_mutability_description` + +return if the collection description is mutable with a collection mutability config + + +
public fun get_collection_mutability_description(config: &token::CollectionMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_collection_mutability_description(config: &CollectionMutabilityConfig): bool {
+    config.description
+}
+
+ + + +
+ + + +## Function `get_collection_mutability_uri` + +return if the collection uri is mutable with a collection mutability config + + +
public fun get_collection_mutability_uri(config: &token::CollectionMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_collection_mutability_uri(config: &CollectionMutabilityConfig): bool {
+    config.uri
+}
+
+ + + +
+ + + +## Function `get_collection_mutability_maximum` + +return if the collection maximum is mutable with collection mutability config + + +
public fun get_collection_mutability_maximum(config: &token::CollectionMutabilityConfig): bool
+
+ + + +
+Implementation + + +
public fun get_collection_mutability_maximum(config: &CollectionMutabilityConfig): bool {
+    config.maximum
+}
+
+ + + +
+ + + +## Function `destroy_token_data` + + + +
fun destroy_token_data(token_data: token::TokenData)
+
+ + + +
+Implementation + + +
fun destroy_token_data(token_data: TokenData) {
+    let TokenData {
+        maximum: _,
+        largest_property_version: _,
+        supply: _,
+        uri: _,
+        royalty: _,
+        name: _,
+        description: _,
+        default_properties: _,
+        mutability_config: _,
+    } = token_data;
+}
+
+ + + +
+ + + +## Function `destroy_collection_data` + + + +
fun destroy_collection_data(collection_data: token::CollectionData)
+
+ + + +
+Implementation + + +
fun destroy_collection_data(collection_data: CollectionData) {
+    let CollectionData {
+        description: _,
+        name: _,
+        uri: _,
+        supply: _,
+        maximum: _,
+        mutability_config: _,
+    } = collection_data;
+}
+
+ + + +
+ + + +## Function `withdraw_with_event_internal` + + + +
fun withdraw_with_event_internal(account_addr: address, id: token::TokenId, amount: u64): token::Token
+
+ + + +
+Implementation + + +
fun withdraw_with_event_internal(
+    account_addr: address,
+    id: TokenId,
+    amount: u64,
+): Token acquires TokenStore {
+    // It does not make sense to withdraw 0 tokens.
+    assert!(amount > 0, error::invalid_argument(EWITHDRAW_ZERO));
+    // Make sure the account has sufficient tokens to withdraw.
+    assert!(balance_of(account_addr, id) >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE));
+
+    assert!(
+        exists<TokenStore>(account_addr),
+        error::not_found(ETOKEN_STORE_NOT_PUBLISHED),
+    );
+
+    let token_store = borrow_global_mut<TokenStore>(account_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Withdraw { id, amount })
+    };
+    event::emit_event<WithdrawEvent>(
+        &mut token_store.withdraw_events,
+        WithdrawEvent { id, amount }
+    );
+    let tokens = &mut borrow_global_mut<TokenStore>(account_addr).tokens;
+    assert!(
+        table::contains(tokens, id),
+        error::not_found(ENO_TOKEN_IN_TOKEN_STORE),
+    );
+    // balance > amount and amount > 0 indirectly asserted that balance > 0.
+    let balance = &mut table::borrow_mut(tokens, id).amount;
+    if (*balance > amount) {
+        *balance = *balance - amount;
+        Token { id, amount, token_properties: property_map::empty() }
+    } else {
+        table::remove(tokens, id)
+    }
+}
+
+ + + +
+ + + +## Function `update_token_property_internal` + + + +
fun update_token_property_internal(token_owner: address, token_id: token::TokenId, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + + +
+Implementation + + +
fun update_token_property_internal(
+    token_owner: address,
+    token_id: TokenId,
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    types: vector<String>,
+) acquires TokenStore {
+    let tokens = &mut borrow_global_mut<TokenStore>(token_owner).tokens;
+    assert!(table::contains(tokens, token_id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE));
+
+    let value = &mut table::borrow_mut(tokens, token_id).token_properties;
+    assert_non_standard_reserved_property(&keys);
+    property_map::update_property_map(value, keys, values, types);
+}
+
+ + + +
+ + + +## Function `direct_deposit` + +Deposit the token balance into the recipients account and emit an event. + + +
fun direct_deposit(account_addr: address, token: token::Token)
+
+ + + +
+Implementation + + +
fun direct_deposit(account_addr: address, token: Token) acquires TokenStore {
+    assert!(token.amount > 0, error::invalid_argument(ETOKEN_CANNOT_HAVE_ZERO_AMOUNT));
+    let token_store = borrow_global_mut<TokenStore>(account_addr);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Deposit { id: token.id, amount: token.amount });
+    };
+    event::emit_event<DepositEvent>(
+        &mut token_store.deposit_events,
+        DepositEvent { id: token.id, amount: token.amount },
+    );
+
+    assert!(
+        exists<TokenStore>(account_addr),
+        error::not_found(ETOKEN_STORE_NOT_PUBLISHED),
+    );
+
+    if (!table::contains(&token_store.tokens, token.id)) {
+        table::add(&mut token_store.tokens, token.id, token);
+    } else {
+        let recipient_token = table::borrow_mut(&mut token_store.tokens, token.id);
+        merge(recipient_token, token);
+    };
+}
+
+ + + +
+ + + +## Function `assert_collection_exists` + + + +
fun assert_collection_exists(creator_address: address, collection_name: string::String)
+
+ + + +
+Implementation + + +
fun assert_collection_exists(creator_address: address, collection_name: String) acquires Collections {
+    assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_collection_data = &borrow_global<Collections>(creator_address).collection_data;
+    assert!(table::contains(all_collection_data, collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED));
+}
+
+ + + +
+ + + +## Function `assert_tokendata_exists` + + + +
fun assert_tokendata_exists(creator: &signer, token_data_id: token::TokenDataId)
+
+ + + +
+Implementation + + +
fun assert_tokendata_exists(creator: &signer, token_data_id: TokenDataId) acquires Collections {
+    let creator_addr = token_data_id.creator;
+    assert!(signer::address_of(creator) == creator_addr, error::permission_denied(ENO_MUTATE_CAPABILITY));
+    assert!(exists<Collections>(creator_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
+    let all_token_data = &mut borrow_global_mut<Collections>(creator_addr).token_data;
+    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+}
+
+ + + +
+ + + +## Function `assert_non_standard_reserved_property` + + + +
fun assert_non_standard_reserved_property(keys: &vector<string::String>)
+
+ + + +
+Implementation + + +
fun assert_non_standard_reserved_property(keys: &vector<String>) {
+    vector::for_each_ref(keys, |key| {
+        let key: &String = key;
+        let length = string::length(key);
+        if (length >= 6) {
+            let prefix = string::sub_string(&*key, 0, 6);
+            assert!(prefix != string::utf8(b"TOKEN_"), error::permission_denied(EPROPERTY_RESERVED_BY_STANDARD));
+        };
+    });
+}
+
+ + + +
+ + + +## Function `initialize_token_script` + + + +
public entry fun initialize_token_script(_account: &signer)
+
+ + + +
+Implementation + + +
public entry fun initialize_token_script(_account: &signer) {
+    abort 0
+}
+
+ + + +
+ + + +## Function `initialize_token` + + + +
public fun initialize_token(_account: &signer, _token_id: token::TokenId)
+
+ + + +
+Implementation + + +
public fun initialize_token(_account: &signer, _token_id: TokenId) {
+    abort 0
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `create_collection_script` + + +
public entry fun create_collection_script(creator: &signer, name: string::String, description: string::String, uri: string::String, maximum: u64, mutate_setting: vector<bool>)
+
+ + +The length of the name is up to MAX_COLLECTION_NAME_LENGTH; +The length of the uri is up to MAX_URI_LENGTH; + + +
pragma aborts_if_is_partial;
+include CreateCollectionAbortsIf;
+
+ + + + + +### Function `create_token_script` + + +
public entry fun create_token_script(account: &signer, collection: string::String, name: string::String, description: string::String, balance: u64, maximum: u64, uri: string::String, royalty_payee_address: address, royalty_points_denominator: u64, royalty_points_numerator: u64, mutate_setting: vector<bool>, property_keys: vector<string::String>, property_values: vector<vector<u8>>, property_types: vector<string::String>)
+
+ + +the length of 'mutate_setting' should maore than five. +The creator of the TokenDataId is signer. +The token_data_id should exist in the creator's collections.. +The sum of supply and mint Token is less than maximum. + + +
pragma aborts_if_is_partial;
+let addr = signer::address_of(account);
+let token_data_id = spec_create_tokendata(addr, collection, name);
+let creator_addr = token_data_id.creator;
+let all_token_data = global<Collections>(creator_addr).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+aborts_if token_data_id.creator != addr;
+aborts_if !exists<Collections>(creator_addr);
+aborts_if balance <= 0;
+include CreateTokenMutabilityConfigAbortsIf;
+include CreateTokenMutabilityConfigAbortsIf;
+
+ + + + + + + +
fun spec_create_tokendata(
+   creator: address,
+   collection: String,
+   name: String): TokenDataId {
+   TokenDataId { creator, collection, name }
+}
+
+ + + + + +### Function `mint_script` + + +
public entry fun mint_script(account: &signer, token_data_address: address, collection: string::String, name: string::String, amount: u64)
+
+ + +only creator of the tokendata can mint tokens + + +
pragma aborts_if_is_partial;
+let token_data_id = spec_create_token_data_id(
+    token_data_address,
+    collection,
+    name,
+);
+let addr = signer::address_of(account);
+let creator_addr = token_data_id.creator;
+let all_token_data = global<Collections>(creator_addr).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+aborts_if token_data_id.creator != signer::address_of(account);
+include CreateTokenDataIdAbortsIf{
+creator: token_data_address,
+collection: collection,
+name: name
+};
+include MintTokenAbortsIf {
+token_data_id: token_data_id
+};
+
+ + + + + +### Function `mutate_token_properties` + + +
public entry fun mutate_token_properties(account: &signer, token_owner: address, creator: address, collection_name: string::String, token_name: string::String, token_property_version: u64, amount: u64, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + +The signer is creator. + + +
pragma aborts_if_is_partial;
+let addr = signer::address_of(account);
+aborts_if addr != creator;
+include CreateTokenDataIdAbortsIf {
+    creator: creator,
+    collection: collection_name,
+    name: token_name
+};
+
+ + + + + +### Function `direct_transfer_script` + + +
public entry fun direct_transfer_script(sender: &signer, receiver: &signer, creators_address: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+include CreateTokenDataIdAbortsIf{
+    creator: creators_address,
+    collection: collection,
+    name: name
+};
+
+ + + + + +### Function `opt_in_direct_transfer` + + +
public entry fun opt_in_direct_transfer(account: &signer, opt_in: bool)
+
+ + + + +
pragma aborts_if_is_partial;
+let addr = signer::address_of(account);
+let account_addr = global<account::Account>(addr);
+aborts_if !exists<TokenStore>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 > MAX_U64;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account_addr.guid_creation_num + 9 > account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account_addr.guid_creation_num + 9 > MAX_U64;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+
+ + + + + +### Function `transfer_with_opt_in` + + +
public entry fun transfer_with_opt_in(from: &signer, creator: address, collection_name: string::String, token_name: string::String, token_property_version: u64, to: address, amount: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+include CreateTokenDataIdAbortsIf{
+    creator: creator,
+    collection: collection_name,
+    name: token_name
+};
+
+ + + + + +### Function `burn_by_creator` + + +
public entry fun burn_by_creator(creator: &signer, owner: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+let creator_address = signer::address_of(creator);
+let token_id = spec_create_token_id_raw(creator_address, collection, name, property_version);
+let creator_addr = token_id.token_data_id.creator;
+let collections = borrow_global_mut<Collections>(creator_address);
+let token_data = table::spec_get(
+    collections.token_data,
+    token_id.token_data_id,
+);
+aborts_if amount <= 0;
+aborts_if !exists<Collections>(creator_addr);
+aborts_if !table::spec_contains(collections.token_data, token_id.token_data_id);
+aborts_if !simple_map::spec_contains_key(token_data.default_properties.map, std::string::spec_utf8(BURNABLE_BY_CREATOR));
+
+ + + + + +### Function `burn` + + +
public entry fun burn(owner: &signer, creators_address: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + +The token_data_id should exist in token_data. + + +
pragma aborts_if_is_partial;
+let token_id = spec_create_token_id_raw(creators_address, collection, name, property_version);
+let creator_addr = token_id.token_data_id.creator;
+let collections = borrow_global_mut<Collections>(creator_addr);
+let token_data = table::spec_get(
+    collections.token_data,
+    token_id.token_data_id,
+);
+include CreateTokenDataIdAbortsIf {
+creator: creators_address
+};
+aborts_if amount <= 0;
+aborts_if !exists<Collections>(creator_addr);
+aborts_if !table::spec_contains(collections.token_data, token_id.token_data_id);
+aborts_if !simple_map::spec_contains_key(token_data.default_properties.map, std::string::spec_utf8(BURNABLE_BY_OWNER));
+aborts_if !string::spec_internal_check_utf8(BURNABLE_BY_OWNER);
+
+ + + + + + + +
fun spec_create_token_id_raw(
+   creator: address,
+   collection: String,
+   name: String,
+   property_version: u64,
+): TokenId {
+   let token_data_id = TokenDataId { creator, collection, name };
+   TokenId {
+       token_data_id,
+       property_version
+   }
+}
+
+ + + + + +### Function `mutate_collection_description` + + +
public fun mutate_collection_description(creator: &signer, collection_name: string::String, description: string::String)
+
+ + +The description of Collection is mutable. + + +
let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let collection_data = table::spec_get(global<Collections>(addr).collection_data, collection_name);
+include AssertCollectionExistsAbortsIf {
+    creator_address: addr,
+    collection_name: collection_name
+};
+aborts_if !collection_data.mutability_config.description;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_collection_uri` + + +
public fun mutate_collection_uri(creator: &signer, collection_name: string::String, uri: string::String)
+
+ + +The uri of Collection is mutable. + + +
let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let collection_data = table::spec_get(global<Collections>(addr).collection_data, collection_name);
+aborts_if len(uri.bytes) > MAX_URI_LENGTH;
+include AssertCollectionExistsAbortsIf {
+    creator_address: addr,
+    collection_name: collection_name
+};
+aborts_if !collection_data.mutability_config.uri;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_collection_maximum` + + +
public fun mutate_collection_maximum(creator: &signer, collection_name: string::String, maximum: u64)
+
+ + +Cannot change maximum from 0 and cannot change maximum to 0. +The maximum should more than suply. +The maxium of Collection is mutable. + + +
let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let collection_data = table::spec_get(global<Collections>(addr).collection_data, collection_name);
+include AssertCollectionExistsAbortsIf {
+    creator_address: addr,
+    collection_name: collection_name
+};
+aborts_if collection_data.maximum == 0 || maximum == 0;
+aborts_if maximum < collection_data.supply;
+aborts_if !collection_data.mutability_config.maximum;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_tokendata_maximum` + + +
public fun mutate_tokendata_maximum(creator: &signer, token_data_id: token::TokenDataId, maximum: u64)
+
+ + +Cannot change maximum from 0 and cannot change maximum to 0. +The maximum should more than suply. +The token maximum is mutable + + +
let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let all_token_data = global<Collections>(token_data_id.creator).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+include AssertTokendataExistsAbortsIf;
+aborts_if token_data.maximum == 0 || maximum == 0;
+aborts_if maximum < token_data.supply;
+aborts_if !token_data.mutability_config.maximum;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_tokendata_uri` + + +
public fun mutate_tokendata_uri(creator: &signer, token_data_id: token::TokenDataId, uri: string::String)
+
+ + +The length of uri should less than MAX_URI_LENGTH. +The creator of token_data_id should exist in Collections. +The token uri is mutable + + +
let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let all_token_data = global<Collections>(token_data_id.creator).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+include AssertTokendataExistsAbortsIf;
+aborts_if len(uri.bytes) > MAX_URI_LENGTH;
+aborts_if !token_data.mutability_config.uri;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_tokendata_royalty` + + +
public fun mutate_tokendata_royalty(creator: &signer, token_data_id: token::TokenDataId, royalty: token::Royalty)
+
+ + +The token royalty is mutable + + +
include AssertTokendataExistsAbortsIf;
+let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let all_token_data = global<Collections>(token_data_id.creator).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+aborts_if !token_data.mutability_config.royalty;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_tokendata_description` + + +
public fun mutate_tokendata_description(creator: &signer, token_data_id: token::TokenDataId, description: string::String)
+
+ + +The token description is mutable + + +
include AssertTokendataExistsAbortsIf;
+let addr = signer::address_of(creator);
+let account = global<account::Account>(addr);
+let all_token_data = global<Collections>(token_data_id.creator).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+aborts_if !token_data.mutability_config.description;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+
+ + + + + +### Function `mutate_tokendata_property` + + +
public fun mutate_tokendata_property(creator: &signer, token_data_id: token::TokenDataId, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + +The property map is mutable + + +
pragma aborts_if_is_partial;
+let all_token_data = global<Collections>(token_data_id.creator).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+include AssertTokendataExistsAbortsIf;
+aborts_if len(keys) != len(values);
+aborts_if len(keys) != len(types);
+aborts_if !token_data.mutability_config.properties;
+
+ + + + + +### Function `mutate_one_token` + + +
public fun mutate_one_token(account: &signer, token_owner: address, token_id: token::TokenId, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>): token::TokenId
+
+ + +The signer is creator. +The token_data_id should exist in token_data. +The property map is mutable. + + +
pragma aborts_if_is_partial;
+let creator = token_id.token_data_id.creator;
+let addr = signer::address_of(account);
+let all_token_data = global<Collections>(creator).token_data;
+let token_data = table::spec_get(all_token_data, token_id.token_data_id);
+aborts_if addr != creator;
+aborts_if !exists<Collections>(creator);
+aborts_if !table::spec_contains(all_token_data, token_id.token_data_id);
+aborts_if !token_data.mutability_config.properties && !simple_map::spec_contains_key(token_data.default_properties.map, std::string::spec_utf8(TOKEN_PROPERTY_MUTABLE));
+
+ + + + + +### Function `create_royalty` + + +
public fun create_royalty(royalty_points_numerator: u64, royalty_points_denominator: u64, payee_address: address): token::Royalty
+
+ + + + +
include CreateRoyaltyAbortsIf;
+
+ + +The royalty_points_numerator should less than royalty_points_denominator. + + + + + +
schema CreateRoyaltyAbortsIf {
+    royalty_points_numerator: u64;
+    royalty_points_denominator: u64;
+    payee_address: address;
+    aborts_if royalty_points_numerator > royalty_points_denominator;
+    aborts_if !exists<account::Account>(payee_address);
+}
+
+ + + + + +### Function `deposit_token` + + +
public fun deposit_token(account: &signer, token: token::Token)
+
+ + + + +
pragma verify = false;
+pragma aborts_if_is_partial;
+let account_addr = signer::address_of(account);
+include !exists<TokenStore>(account_addr) ==> InitializeTokenStore;
+let token_id = token.id;
+let token_amount = token.amount;
+include DirectDepositAbortsIf;
+
+ + + + + +### Function `direct_deposit_with_opt_in` + + +
public fun direct_deposit_with_opt_in(account_addr: address, token: token::Token)
+
+ + +The token can direct_transfer. + + +
let opt_in_transfer = global<TokenStore>(account_addr).direct_transfer;
+aborts_if !exists<TokenStore>(account_addr);
+aborts_if !opt_in_transfer;
+let token_id = token.id;
+let token_amount = token.amount;
+include DirectDepositAbortsIf;
+
+ + + + + +### Function `direct_transfer` + + +
public fun direct_transfer(sender: &signer, receiver: &signer, token_id: token::TokenId, amount: u64)
+
+ + +Cannot withdraw 0 tokens. +Make sure the account has sufficient tokens to withdraw. + + +
pragma verify = false;
+
+ + + + + +### Function `initialize_token_store` + + +
public fun initialize_token_store(account: &signer)
+
+ + + + +
include InitializeTokenStore;
+
+ + + + + + + +
schema InitializeTokenStore {
+    account: signer;
+    let addr = signer::address_of(account);
+    let account_addr = global<account::Account>(addr);
+    aborts_if !exists<TokenStore>(addr) && !exists<account::Account>(addr);
+    aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 > MAX_U64;
+}
+
+ + + + + +### Function `merge` + + +
public fun merge(dst_token: &mut token::Token, source_token: token::Token)
+
+ + + + +
aborts_if dst_token.id != source_token.id;
+aborts_if dst_token.amount + source_token.amount > MAX_U64;
+
+ + + + + +### Function `split` + + +
public fun split(dst_token: &mut token::Token, amount: u64): token::Token
+
+ + + + +
aborts_if dst_token.id.property_version != 0;
+aborts_if dst_token.amount <= amount;
+aborts_if amount <= 0;
+
+ + + + + +### Function `transfer` + + +
public fun transfer(from: &signer, id: token::TokenId, to: address, amount: u64)
+
+ + + + +
let opt_in_transfer = global<TokenStore>(to).direct_transfer;
+let account_addr = signer::address_of(from);
+aborts_if !opt_in_transfer;
+pragma aborts_if_is_partial;
+include WithdrawWithEventInternalAbortsIf;
+
+ + + + + +### Function `withdraw_with_capability` + + +
public fun withdraw_with_capability(withdraw_proof: token::WithdrawCapability): token::Token
+
+ + + + +
let now_seconds = global<timestamp::CurrentTimeMicroseconds>(@aptos_framework).microseconds;
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if now_seconds / timestamp::MICRO_CONVERSION_FACTOR > withdraw_proof.expiration_sec;
+include WithdrawWithEventInternalAbortsIf{
+account_addr: withdraw_proof.token_owner,
+id: withdraw_proof.token_id,
+amount: withdraw_proof.amount};
+
+ + + + + +### Function `partial_withdraw_with_capability` + + +
public fun partial_withdraw_with_capability(withdraw_proof: token::WithdrawCapability, withdraw_amount: u64): (token::Token, option::Option<token::WithdrawCapability>)
+
+ + + + +
let now_seconds = global<timestamp::CurrentTimeMicroseconds>(@aptos_framework).microseconds;
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if now_seconds / timestamp::MICRO_CONVERSION_FACTOR > withdraw_proof.expiration_sec;
+aborts_if withdraw_amount > withdraw_proof.amount;
+include WithdrawWithEventInternalAbortsIf{
+    account_addr: withdraw_proof.token_owner,
+    id: withdraw_proof.token_id,
+    amount: withdraw_amount
+};
+
+ + + + + +### Function `withdraw_token` + + +
public fun withdraw_token(account: &signer, id: token::TokenId, amount: u64): token::Token
+
+ + +Cannot withdraw 0 tokens. +Make sure the account has sufficient tokens to withdraw. + + +
let account_addr = signer::address_of(account);
+include WithdrawWithEventInternalAbortsIf;
+
+ + + + + +### Function `create_collection` + + +
public fun create_collection(creator: &signer, name: string::String, description: string::String, uri: string::String, maximum: u64, mutate_setting: vector<bool>)
+
+ + +The length of the name is up to MAX_COLLECTION_NAME_LENGTH; +The length of the uri is up to MAX_URI_LENGTH; +The collection_data should not exist before you create it. + + +
pragma aborts_if_is_partial;
+let account_addr = signer::address_of(creator);
+aborts_if len(name.bytes) > 128;
+aborts_if len(uri.bytes) > 512;
+include CreateCollectionAbortsIf;
+
+ + + + + + + +
schema CreateCollectionAbortsIf {
+    creator: signer;
+    name: String;
+    description: String;
+    uri: String;
+    maximum: u64;
+    mutate_setting: vector<bool>;
+    let addr = signer::address_of(creator);
+    let account = global<account::Account>(addr);
+    let collection = global<Collections>(addr);
+    let b = !exists<Collections>(addr);
+    let collection_data = global<Collections>(addr).collection_data;
+    aborts_if b && !exists<account::Account>(addr);
+    aborts_if len(name.bytes) > MAX_COLLECTION_NAME_LENGTH;
+    aborts_if len(uri.bytes) > MAX_URI_LENGTH;
+    aborts_if b && account.guid_creation_num + 3 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if b && account.guid_creation_num + 3 > MAX_U64;
+    include CreateCollectionMutabilityConfigAbortsIf;
+}
+
+ + + + + +### Function `check_collection_exists` + + +
public fun check_collection_exists(creator: address, name: string::String): bool
+
+ + + + +
aborts_if !exists<Collections>(creator);
+
+ + + + + +### Function `check_tokendata_exists` + + +
public fun check_tokendata_exists(creator: address, collection_name: string::String, token_name: string::String): bool
+
+ + +The length of collection should less than MAX_COLLECTION_NAME_LENGTH +The length of name should less than MAX_NFT_NAME_LENGTH + + +
aborts_if !exists<Collections>(creator);
+include CreateTokenDataIdAbortsIf {
+    creator: creator,
+    collection: collection_name,
+    name: token_name
+};
+
+ + + + + +### Function `create_tokendata` + + +
public fun create_tokendata(account: &signer, collection: string::String, name: string::String, description: string::String, maximum: u64, uri: string::String, royalty_payee_address: address, royalty_points_denominator: u64, royalty_points_numerator: u64, token_mutate_config: token::TokenMutabilityConfig, property_keys: vector<string::String>, property_values: vector<vector<u8>>, property_types: vector<string::String>): token::TokenDataId
+
+ + +The length of collection should less than MAX_COLLECTION_NAME_LENGTH +The length of name should less than MAX_NFT_NAME_LENGTH + + +
pragma verify = false;
+pragma aborts_if_is_partial;
+let account_addr = signer::address_of(account);
+let collections = global<Collections>(account_addr);
+let token_data_id = spec_create_token_data_id(account_addr, collection, name);
+let Collection = table::spec_get(collections.collection_data, token_data_id.collection);
+let length = len(property_keys);
+aborts_if len(name.bytes) > MAX_NFT_NAME_LENGTH;
+aborts_if len(collection.bytes) > MAX_COLLECTION_NAME_LENGTH;
+aborts_if len(uri.bytes) > MAX_URI_LENGTH;
+aborts_if royalty_points_numerator > royalty_points_denominator;
+aborts_if !exists<Collections>(account_addr);
+include CreateTokenDataIdAbortsIf {
+    creator: account_addr,
+    collection: collection,
+    name: name
+};
+aborts_if !table::spec_contains(collections.collection_data, collection);
+aborts_if table::spec_contains(collections.token_data, token_data_id);
+aborts_if Collection.maximum > 0 && Collection.supply + 1 > MAX_U64;
+aborts_if Collection.maximum > 0 && Collection.maximum < Collection.supply + 1;
+include CreateRoyaltyAbortsIf {
+    payee_address: royalty_payee_address
+};
+aborts_if length > property_map::MAX_PROPERTY_MAP_SIZE;
+aborts_if length != len(property_values);
+aborts_if length != len(property_types);
+
+ + + + + + + +
fun spec_create_token_data_id(
+   creator: address,
+   collection: String,
+   name: String,
+): TokenDataId {
+   TokenDataId { creator, collection, name }
+}
+
+ + + + + +### Function `get_collection_supply` + + +
public fun get_collection_supply(creator_address: address, collection_name: string::String): option::Option<u64>
+
+ + + + +
include AssertCollectionExistsAbortsIf;
+
+ + + + + +### Function `get_collection_description` + + +
public fun get_collection_description(creator_address: address, collection_name: string::String): string::String
+
+ + + + +
include AssertCollectionExistsAbortsIf;
+
+ + + + + +### Function `get_collection_uri` + + +
public fun get_collection_uri(creator_address: address, collection_name: string::String): string::String
+
+ + + + +
include AssertCollectionExistsAbortsIf;
+
+ + + + + +### Function `get_collection_maximum` + + +
public fun get_collection_maximum(creator_address: address, collection_name: string::String): u64
+
+ + + + +
include AssertCollectionExistsAbortsIf;
+
+ + + + + +### Function `get_token_supply` + + +
public fun get_token_supply(creator_address: address, token_data_id: token::TokenDataId): option::Option<u64>
+
+ + + + +
aborts_if !exists<Collections>(creator_address);
+let all_token_data = global<Collections>(creator_address).token_data;
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+
+ + + + + +### Function `get_tokendata_largest_property_version` + + +
public fun get_tokendata_largest_property_version(creator_address: address, token_data_id: token::TokenDataId): u64
+
+ + + + +
aborts_if !exists<Collections>(creator_address);
+let all_token_data = global<Collections>(creator_address).token_data;
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+
+ + + + + +### Function `create_token_mutability_config` + + +
public fun create_token_mutability_config(mutate_setting: &vector<bool>): token::TokenMutabilityConfig
+
+ + +The length of 'mutate_setting' should more than five. +The mutate_setting shuold have a value. + + +
include CreateTokenMutabilityConfigAbortsIf;
+
+ + + + + + + +
schema CreateTokenMutabilityConfigAbortsIf {
+    mutate_setting: vector<bool>;
+    aborts_if len(mutate_setting) < 5;
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[TOKEN_MAX_MUTABLE_IND]);
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[TOKEN_URI_MUTABLE_IND]);
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[TOKEN_ROYALTY_MUTABLE_IND]);
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[TOKEN_DESCRIPTION_MUTABLE_IND]);
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[TOKEN_PROPERTY_MUTABLE_IND]);
+}
+
+ + + + + +### Function `create_collection_mutability_config` + + +
public fun create_collection_mutability_config(mutate_setting: &vector<bool>): token::CollectionMutabilityConfig
+
+ + + + +
include CreateCollectionMutabilityConfigAbortsIf;
+
+ + + + + + + +
schema CreateCollectionMutabilityConfigAbortsIf {
+    mutate_setting: vector<bool>;
+    aborts_if len(mutate_setting) < 3;
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[COLLECTION_DESCRIPTION_MUTABLE_IND]);
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[COLLECTION_URI_MUTABLE_IND]);
+    aborts_if !vector::spec_contains(mutate_setting, mutate_setting[COLLECTION_MAX_MUTABLE_IND]);
+}
+
+ + + + + +### Function `mint_token` + + +
public fun mint_token(account: &signer, token_data_id: token::TokenDataId, amount: u64): token::TokenId
+
+ + +The creator of the TokenDataId is signer. +The token_data_id should exist in the creator's collections.. +The sum of supply and the amount of mint Token is less than maximum. + + +
pragma verify = false;
+
+ + + + + + + +
schema MintTokenAbortsIf {
+    account: signer;
+    token_data_id: TokenDataId;
+    amount: u64;
+    let addr = signer::address_of(account);
+    let creator_addr = token_data_id.creator;
+    let all_token_data = global<Collections>(creator_addr).token_data;
+    let token_data = table::spec_get(all_token_data, token_data_id);
+    aborts_if token_data_id.creator != addr;
+    aborts_if !table::spec_contains(all_token_data, token_data_id);
+    aborts_if token_data.maximum > 0 && token_data.supply + amount > token_data.maximum;
+    aborts_if !exists<Collections>(creator_addr);
+    aborts_if amount <= 0;
+    include InitializeTokenStore;
+    let token_id = create_token_id(token_data_id, 0);
+}
+
+ + + + + +### Function `mint_token_to` + + +
public fun mint_token_to(account: &signer, receiver: address, token_data_id: token::TokenDataId, amount: u64)
+
+ + + + +
let addr = signer::address_of(account);
+let opt_in_transfer = global<TokenStore>(receiver).direct_transfer;
+let creator_addr = token_data_id.creator;
+let all_token_data = global<Collections>(creator_addr).token_data;
+let token_data = table::spec_get(all_token_data, token_data_id);
+aborts_if !exists<TokenStore>(receiver);
+aborts_if !opt_in_transfer;
+aborts_if token_data_id.creator != addr;
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+aborts_if token_data.maximum > 0 && token_data.supply + amount > token_data.maximum;
+aborts_if amount <= 0;
+aborts_if !exists<Collections>(creator_addr);
+let token_id = create_token_id(token_data_id, 0);
+include DirectDepositAbortsIf {
+    account_addr: receiver,
+    token_id: token_id,
+    token_amount: amount,
+};
+
+ + + + + +### Function `create_token_data_id` + + +
public fun create_token_data_id(creator: address, collection: string::String, name: string::String): token::TokenDataId
+
+ + +The length of collection should less than MAX_COLLECTION_NAME_LENGTH +The length of name should less than MAX_NFT_NAME_LENGTH + + +
include CreateTokenDataIdAbortsIf;
+
+ + + + + + + +
schema CreateTokenDataIdAbortsIf {
+    creator: address;
+    collection: String;
+    name: String;
+    aborts_if len(collection.bytes) > MAX_COLLECTION_NAME_LENGTH;
+    aborts_if len(name.bytes) > MAX_NFT_NAME_LENGTH;
+}
+
+ + + + + +### Function `create_token_id_raw` + + +
public fun create_token_id_raw(creator: address, collection: string::String, name: string::String, property_version: u64): token::TokenId
+
+ + +The length of collection should less than MAX_COLLECTION_NAME_LENGTH +The length of name should less than MAX_NFT_NAME_LENGTH + + +
include CreateTokenDataIdAbortsIf;
+
+ + + + + + + +
fun spec_balance_of(owner: address, id: TokenId): u64 {
+   let token_store = borrow_global<TokenStore>(owner);
+   if (!exists<TokenStore>(owner)) {
+       0
+   }
+   else if (table::spec_contains(token_store.tokens, id)) {
+       table::spec_get(token_store.tokens, id).amount
+   } else {
+       0
+   }
+}
+
+ + + + + +### Function `get_royalty` + + +
public fun get_royalty(token_id: token::TokenId): token::Royalty
+
+ + + + +
include GetTokendataRoyaltyAbortsIf {
+    token_data_id: token_id.token_data_id
+};
+
+ + + + + +### Function `get_property_map` + + +
public fun get_property_map(owner: address, token_id: token::TokenId): property_map::PropertyMap
+
+ + + + +
let creator_addr = token_id.token_data_id.creator;
+let all_token_data = global<Collections>(creator_addr).token_data;
+aborts_if spec_balance_of(owner, token_id) <= 0;
+aborts_if token_id.property_version == 0 && !table::spec_contains(all_token_data, token_id.token_data_id);
+aborts_if token_id.property_version == 0 && !exists<Collections>(creator_addr);
+
+ + + + + +### Function `get_tokendata_maximum` + + +
public fun get_tokendata_maximum(token_data_id: token::TokenDataId): u64
+
+ + + + +
let creator_address = token_data_id.creator;
+aborts_if !exists<Collections>(creator_address);
+let all_token_data = global<Collections>(creator_address).token_data;
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+
+ + + + + +### Function `get_tokendata_uri` + + +
public fun get_tokendata_uri(creator: address, token_data_id: token::TokenDataId): string::String
+
+ + + + +
aborts_if !exists<Collections>(creator);
+let all_token_data = global<Collections>(creator).token_data;
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+
+ + + + + +### Function `get_tokendata_description` + + +
public fun get_tokendata_description(token_data_id: token::TokenDataId): string::String
+
+ + + + +
let creator_address = token_data_id.creator;
+aborts_if !exists<Collections>(creator_address);
+let all_token_data = global<Collections>(creator_address).token_data;
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+
+ + + + + +### Function `get_tokendata_royalty` + + +
public fun get_tokendata_royalty(token_data_id: token::TokenDataId): token::Royalty
+
+ + + + +
include GetTokendataRoyaltyAbortsIf;
+
+ + + + + + + +
schema GetTokendataRoyaltyAbortsIf {
+    token_data_id: TokenDataId;
+    let creator_address = token_data_id.creator;
+    let all_token_data = global<Collections>(creator_address).token_data;
+    aborts_if !exists<Collections>(creator_address);
+    aborts_if !table::spec_contains(all_token_data, token_data_id);
+}
+
+ + + + + +### Function `get_tokendata_mutability_config` + + +
public fun get_tokendata_mutability_config(token_data_id: token::TokenDataId): token::TokenMutabilityConfig
+
+ + + + +
let creator_addr = token_data_id.creator;
+let all_token_data = global<Collections>(creator_addr).token_data;
+aborts_if !exists<Collections>(creator_addr);
+aborts_if !table::spec_contains(all_token_data, token_data_id);
+
+ + + + + +### Function `get_collection_mutability_config` + + +
#[view]
+public fun get_collection_mutability_config(creator: address, collection_name: string::String): token::CollectionMutabilityConfig
+
+ + + + +
let all_collection_data = global<Collections>(creator).collection_data;
+aborts_if !exists<Collections>(creator);
+aborts_if !table::spec_contains(all_collection_data, collection_name);
+
+ + + + + +### Function `withdraw_with_event_internal` + + +
fun withdraw_with_event_internal(account_addr: address, id: token::TokenId, amount: u64): token::Token
+
+ + + + +
include WithdrawWithEventInternalAbortsIf;
+
+ + + + + + + +
schema WithdrawWithEventInternalAbortsIf {
+    account_addr: address;
+    id: TokenId;
+    amount: u64;
+    let tokens = global<TokenStore>(account_addr).tokens;
+    aborts_if amount <= 0;
+    aborts_if spec_balance_of(account_addr, id) < amount;
+    aborts_if !exists<TokenStore>(account_addr);
+    aborts_if !table::spec_contains(tokens, id);
+}
+
+ + + + + +### Function `update_token_property_internal` + + +
fun update_token_property_internal(token_owner: address, token_id: token::TokenId, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
+ + + + +
pragma aborts_if_is_partial;
+let tokens = global<TokenStore>(token_owner).tokens;
+aborts_if !exists<TokenStore>(token_owner);
+aborts_if !table::spec_contains(tokens, token_id);
+
+ + + + + +### Function `direct_deposit` + + +
fun direct_deposit(account_addr: address, token: token::Token)
+
+ + + + +
let token_id = token.id;
+let token_amount = token.amount;
+include DirectDepositAbortsIf;
+
+ + + + + + + +
schema DirectDepositAbortsIf {
+    account_addr: address;
+    token_id: TokenId;
+    token_amount: u64;
+    let token_store = global<TokenStore>(account_addr);
+    let recipient_token = table::spec_get(token_store.tokens, token_id);
+    let b = table::spec_contains(token_store.tokens, token_id);
+    aborts_if token_amount <= 0;
+    aborts_if !exists<TokenStore>(account_addr);
+    aborts_if b && recipient_token.id != token_id;
+    aborts_if b && recipient_token.amount + token_amount > MAX_U64;
+}
+
+ + + + + +### Function `assert_collection_exists` + + +
fun assert_collection_exists(creator_address: address, collection_name: string::String)
+
+ + +The collection_name should exist in collection_data of the creator_address's Collections. + + +
include AssertCollectionExistsAbortsIf;
+
+ + + + + + + +
schema AssertCollectionExistsAbortsIf {
+    creator_address: address;
+    collection_name: String;
+    let all_collection_data = global<Collections>(creator_address).collection_data;
+    aborts_if !exists<Collections>(creator_address);
+    aborts_if !table::spec_contains(all_collection_data, collection_name);
+}
+
+ + + + + +### Function `assert_tokendata_exists` + + +
fun assert_tokendata_exists(creator: &signer, token_data_id: token::TokenDataId)
+
+ + +The creator of token_data_id should be signer. +The creator of token_data_id exists in Collections. +The token_data_id is in the all_token_data. + + +
include AssertTokendataExistsAbortsIf;
+
+ + + + + + + +
schema AssertTokendataExistsAbortsIf {
+    creator: signer;
+    token_data_id: TokenDataId;
+    let creator_addr = token_data_id.creator;
+    let addr = signer::address_of(creator);
+    aborts_if addr != creator_addr;
+    aborts_if !exists<Collections>(creator_addr);
+    let all_token_data = global<Collections>(creator_addr).token_data;
+    aborts_if !table::spec_contains(all_token_data, token_data_id);
+}
+
+ + + + + +### Function `assert_non_standard_reserved_property` + + +
fun assert_non_standard_reserved_property(keys: &vector<string::String>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `initialize_token_script` + + +
public entry fun initialize_token_script(_account: &signer)
+
+ + +Deprecated function + + +
pragma verify = false;
+
+ + + + + +### Function `initialize_token` + + +
public fun initialize_token(_account: &signer, _token_id: token::TokenId)
+
+ + +Deprecated function + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_coin_swap.md b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_coin_swap.md new file mode 100644 index 0000000000000..5d66a8ffd04c8 --- /dev/null +++ b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_coin_swap.md @@ -0,0 +1,633 @@ + + + +# Module `0x3::token_coin_swap` + +Deprecated module + + +- [Struct `TokenCoinSwap`](#0x3_token_coin_swap_TokenCoinSwap) +- [Resource `TokenListings`](#0x3_token_coin_swap_TokenListings) +- [Struct `TokenEscrow`](#0x3_token_coin_swap_TokenEscrow) +- [Resource `TokenStoreEscrow`](#0x3_token_coin_swap_TokenStoreEscrow) +- [Struct `TokenListingEvent`](#0x3_token_coin_swap_TokenListingEvent) +- [Struct `TokenSwapEvent`](#0x3_token_coin_swap_TokenSwapEvent) +- [Constants](#@Constants_0) +- [Function `does_listing_exist`](#0x3_token_coin_swap_does_listing_exist) +- [Function `exchange_coin_for_token`](#0x3_token_coin_swap_exchange_coin_for_token) +- [Function `list_token_for_swap`](#0x3_token_coin_swap_list_token_for_swap) +- [Function `initialize_token_listing`](#0x3_token_coin_swap_initialize_token_listing) +- [Function `initialize_token_store_escrow`](#0x3_token_coin_swap_initialize_token_store_escrow) +- [Function `deposit_token_to_escrow`](#0x3_token_coin_swap_deposit_token_to_escrow) +- [Function `withdraw_token_from_escrow_internal`](#0x3_token_coin_swap_withdraw_token_from_escrow_internal) +- [Function `withdraw_token_from_escrow`](#0x3_token_coin_swap_withdraw_token_from_escrow) +- [Function `cancel_token_listing`](#0x3_token_coin_swap_cancel_token_listing) +- [Specification](#@Specification_1) + + +
use 0x1::error;
+use 0x1::event;
+use 0x1::string;
+use 0x1::table;
+use 0x1::type_info;
+use 0x3::token;
+
+ + + + + +## Struct `TokenCoinSwap` + +TokenCoinSwap records a swap ask for swapping token_amount with CoinType with a minimal price per token + + +
struct TokenCoinSwap<CoinType> has drop, store
+
+ + + +
+Fields + + +
+
+token_amount: u64 +
+
+ +
+
+min_price_per_token: u64 +
+
+ +
+
+ + +
+ + + +## Resource `TokenListings` + +The listing of all tokens for swapping stored at token owner's account + + +
struct TokenListings<CoinType> has key
+
+ + + +
+Fields + + +
+
+listings: table::Table<token::TokenId, token_coin_swap::TokenCoinSwap<CoinType>> +
+
+ +
+
+listing_events: event::EventHandle<token_coin_swap::TokenListingEvent> +
+
+ +
+
+swap_events: event::EventHandle<token_coin_swap::TokenSwapEvent> +
+
+ +
+
+ + +
+ + + +## Struct `TokenEscrow` + +TokenEscrow holds the tokens that cannot be withdrawn or transferred + + +
struct TokenEscrow has store
+
+ + + +
+Fields + + +
+
+token: token::Token +
+
+ +
+
+locked_until_secs: u64 +
+
+ +
+
+ + +
+ + + +## Resource `TokenStoreEscrow` + +TokenStoreEscrow holds a map of token id to their tokenEscrow + + +
struct TokenStoreEscrow has key
+
+ + + +
+Fields + + +
+
+token_escrows: table::Table<token::TokenId, token_coin_swap::TokenEscrow> +
+
+ +
+
+ + +
+ + + +## Struct `TokenListingEvent` + + + +
struct TokenListingEvent has drop, store
+
+ + + +
+Fields + + +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+min_price: u64 +
+
+ +
+
+locked_until_secs: u64 +
+
+ +
+
+coin_type_info: type_info::TypeInfo +
+
+ +
+
+ + +
+ + + +## Struct `TokenSwapEvent` + + + +
struct TokenSwapEvent has drop, store
+
+ + + +
+Fields + + +
+
+token_id: token::TokenId +
+
+ +
+
+token_buyer: address +
+
+ +
+
+token_amount: u64 +
+
+ +
+
+coin_amount: u64 +
+
+ +
+
+coin_type_info: type_info::TypeInfo +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Deprecated module + + +
const EDEPRECATED_MODULE: u64 = 8;
+
+ + + + + +Not enough coin to buy token + + +
const ENOT_ENOUGH_COIN: u64 = 7;
+
+ + + + + +Token already listed + + +
const ETOKEN_ALREADY_LISTED: u64 = 1;
+
+ + + + + +Token buy amount doesn't match listing amount + + +
const ETOKEN_AMOUNT_NOT_MATCH: u64 = 6;
+
+ + + + + +Token cannot be moved out of escrow before the lockup time + + +
const ETOKEN_CANNOT_MOVE_OUT_OF_ESCROW_BEFORE_LOCKUP_TIME: u64 = 4;
+
+ + + + + +Token listing no longer exists + + +
const ETOKEN_LISTING_NOT_EXIST: u64 = 2;
+
+ + + + + +Token buy price doesn't match listing price + + +
const ETOKEN_MIN_PRICE_NOT_MATCH: u64 = 5;
+
+ + + + + +Token is not in escrow + + +
const ETOKEN_NOT_IN_ESCROW: u64 = 3;
+
+ + + + + +## Function `does_listing_exist` + + + +
public fun does_listing_exist<CoinType>(_token_owner: address, _token_id: token::TokenId): bool
+
+ + + +
+Implementation + + +
public fun does_listing_exist<CoinType>(
+    _token_owner: address,
+    _token_id: TokenId
+): bool {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `exchange_coin_for_token` + +Coin owner withdraw coin to swap with tokens listed for swapping at the token owner's address. + + +
public fun exchange_coin_for_token<CoinType>(_coin_owner: &signer, _coin_amount: u64, _token_owner: address, _creators_address: address, _collection: string::String, _name: string::String, _property_version: u64, _token_amount: u64)
+
+ + + +
+Implementation + + +
public fun exchange_coin_for_token<CoinType>(
+    _coin_owner: &signer,
+    _coin_amount: u64,
+    _token_owner: address,
+    _creators_address: address,
+    _collection: String,
+    _name: String,
+    _property_version: u64,
+    _token_amount: u64,
+) {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `list_token_for_swap` + +Token owner lists their token for swapping + + +
public entry fun list_token_for_swap<CoinType>(_token_owner: &signer, _creators_address: address, _collection: string::String, _name: string::String, _property_version: u64, _token_amount: u64, _min_coin_per_token: u64, _locked_until_secs: u64)
+
+ + + +
+Implementation + + +
public entry fun list_token_for_swap<CoinType>(
+    _token_owner: &signer,
+    _creators_address: address,
+    _collection: String,
+    _name: String,
+    _property_version: u64,
+    _token_amount: u64,
+    _min_coin_per_token: u64,
+    _locked_until_secs: u64
+) {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `initialize_token_listing` + +Initalize the token listing for a token owner + + +
fun initialize_token_listing<CoinType>(_token_owner: &signer)
+
+ + + +
+Implementation + + +
fun initialize_token_listing<CoinType>(_token_owner: &signer) {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `initialize_token_store_escrow` + +Intialize the token escrow + + +
fun initialize_token_store_escrow(_token_owner: &signer)
+
+ + + +
+Implementation + + +
fun initialize_token_store_escrow(_token_owner: &signer) {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `deposit_token_to_escrow` + +Put the token into escrow that cannot be transferred or withdrawed by the owner. + + +
public fun deposit_token_to_escrow(_token_owner: &signer, _token_id: token::TokenId, _tokens: token::Token, _locked_until_secs: u64)
+
+ + + +
+Implementation + + +
public fun deposit_token_to_escrow(
+    _token_owner: &signer,
+    _token_id: TokenId,
+    _tokens: Token,
+    _locked_until_secs: u64
+) {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `withdraw_token_from_escrow_internal` + +Private function for withdraw tokens from an escrow stored in token owner address + + +
fun withdraw_token_from_escrow_internal(_token_owner_addr: address, _token_id: token::TokenId, _amount: u64): token::Token
+
+ + + +
+Implementation + + +
fun withdraw_token_from_escrow_internal(
+    _token_owner_addr: address,
+    _token_id: TokenId,
+    _amount: u64
+): Token {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `withdraw_token_from_escrow` + +Withdraw tokens from the token escrow. It needs a signer to authorize + + +
public fun withdraw_token_from_escrow(_token_owner: &signer, _token_id: token::TokenId, _amount: u64): token::Token
+
+ + + +
+Implementation + + +
public fun withdraw_token_from_escrow(
+    _token_owner: &signer,
+    _token_id: TokenId,
+    _amount: u64
+): Token {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Function `cancel_token_listing` + +Cancel token listing for a fixed amount + + +
public fun cancel_token_listing<CoinType>(_token_owner: &signer, _token_id: token::TokenId, _token_amount: u64)
+
+ + + +
+Implementation + + +
public fun cancel_token_listing<CoinType>(
+    _token_owner: &signer,
+    _token_id: TokenId,
+    _token_amount: u64,
+) {
+    abort error::invalid_argument(EDEPRECATED_MODULE)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_event_store.md b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_event_store.md new file mode 100644 index 0000000000000..031fefa26208c --- /dev/null +++ b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_event_store.md @@ -0,0 +1,1787 @@ + + + +# Module `0x3::token_event_store` + +This module provides utils to add and emit new token events that are not in token.move + + +- [Struct `CollectionDescriptionMutateEvent`](#0x3_token_event_store_CollectionDescriptionMutateEvent) +- [Struct `CollectionDescriptionMutate`](#0x3_token_event_store_CollectionDescriptionMutate) +- [Struct `CollectionUriMutateEvent`](#0x3_token_event_store_CollectionUriMutateEvent) +- [Struct `CollectionUriMutate`](#0x3_token_event_store_CollectionUriMutate) +- [Struct `CollectionMaxiumMutateEvent`](#0x3_token_event_store_CollectionMaxiumMutateEvent) +- [Struct `CollectionMaxiumMutate`](#0x3_token_event_store_CollectionMaxiumMutate) +- [Struct `OptInTransferEvent`](#0x3_token_event_store_OptInTransferEvent) +- [Struct `OptInTransfer`](#0x3_token_event_store_OptInTransfer) +- [Struct `UriMutationEvent`](#0x3_token_event_store_UriMutationEvent) +- [Struct `UriMutation`](#0x3_token_event_store_UriMutation) +- [Struct `DefaultPropertyMutateEvent`](#0x3_token_event_store_DefaultPropertyMutateEvent) +- [Struct `DefaultPropertyMutate`](#0x3_token_event_store_DefaultPropertyMutate) +- [Struct `DescriptionMutateEvent`](#0x3_token_event_store_DescriptionMutateEvent) +- [Struct `DescriptionMutate`](#0x3_token_event_store_DescriptionMutate) +- [Struct `RoyaltyMutateEvent`](#0x3_token_event_store_RoyaltyMutateEvent) +- [Struct `RoyaltyMutate`](#0x3_token_event_store_RoyaltyMutate) +- [Struct `MaxiumMutateEvent`](#0x3_token_event_store_MaxiumMutateEvent) +- [Struct `MaximumMutate`](#0x3_token_event_store_MaximumMutate) +- [Resource `TokenEventStoreV1`](#0x3_token_event_store_TokenEventStoreV1) +- [Function `initialize_token_event_store`](#0x3_token_event_store_initialize_token_event_store) +- [Function `emit_collection_uri_mutate_event`](#0x3_token_event_store_emit_collection_uri_mutate_event) +- [Function `emit_collection_description_mutate_event`](#0x3_token_event_store_emit_collection_description_mutate_event) +- [Function `emit_collection_maximum_mutate_event`](#0x3_token_event_store_emit_collection_maximum_mutate_event) +- [Function `emit_token_opt_in_event`](#0x3_token_event_store_emit_token_opt_in_event) +- [Function `emit_token_uri_mutate_event`](#0x3_token_event_store_emit_token_uri_mutate_event) +- [Function `emit_default_property_mutate_event`](#0x3_token_event_store_emit_default_property_mutate_event) +- [Function `emit_token_descrition_mutate_event`](#0x3_token_event_store_emit_token_descrition_mutate_event) +- [Function `emit_token_royalty_mutate_event`](#0x3_token_event_store_emit_token_royalty_mutate_event) +- [Function `emit_token_maximum_mutate_event`](#0x3_token_event_store_emit_token_maximum_mutate_event) +- [Specification](#@Specification_0) + - [Function `initialize_token_event_store`](#@Specification_0_initialize_token_event_store) + - [Function `emit_collection_uri_mutate_event`](#@Specification_0_emit_collection_uri_mutate_event) + - [Function `emit_collection_description_mutate_event`](#@Specification_0_emit_collection_description_mutate_event) + - [Function `emit_collection_maximum_mutate_event`](#@Specification_0_emit_collection_maximum_mutate_event) + - [Function `emit_token_opt_in_event`](#@Specification_0_emit_token_opt_in_event) + - [Function `emit_token_uri_mutate_event`](#@Specification_0_emit_token_uri_mutate_event) + - [Function `emit_default_property_mutate_event`](#@Specification_0_emit_default_property_mutate_event) + - [Function `emit_token_descrition_mutate_event`](#@Specification_0_emit_token_descrition_mutate_event) + - [Function `emit_token_royalty_mutate_event`](#@Specification_0_emit_token_royalty_mutate_event) + - [Function `emit_token_maximum_mutate_event`](#@Specification_0_emit_token_maximum_mutate_event) + + +
use 0x1::account;
+use 0x1::any;
+use 0x1::event;
+use 0x1::features;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x3::property_map;
+
+ + + + + +## Struct `CollectionDescriptionMutateEvent` + +Event emitted when collection description is mutated + + +
struct CollectionDescriptionMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_description: string::String +
+
+ +
+
+new_description: string::String +
+
+ +
+
+ + +
+ + + +## Struct `CollectionDescriptionMutate` + +Event emitted when collection description is mutated + + +
#[event]
+struct CollectionDescriptionMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_description: string::String +
+
+ +
+
+new_description: string::String +
+
+ +
+
+ + +
+ + + +## Struct `CollectionUriMutateEvent` + +Event emitted when collection uri is mutated + + +
struct CollectionUriMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_uri: string::String +
+
+ +
+
+new_uri: string::String +
+
+ +
+
+ + +
+ + + +## Struct `CollectionUriMutate` + +Event emitted when collection uri is mutated + + +
#[event]
+struct CollectionUriMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_uri: string::String +
+
+ +
+
+new_uri: string::String +
+
+ +
+
+ + +
+ + + +## Struct `CollectionMaxiumMutateEvent` + +Event emitted when the collection maximum is mutated + + +
struct CollectionMaxiumMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_maximum: u64 +
+
+ +
+
+new_maximum: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CollectionMaxiumMutate` + +Event emitted when the collection maximum is mutated + + +
#[event]
+struct CollectionMaxiumMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_maximum: u64 +
+
+ +
+
+new_maximum: u64 +
+
+ +
+
+ + +
+ + + +## Struct `OptInTransferEvent` + +Event emitted when an user opt-in the direct transfer + + +
struct OptInTransferEvent has drop, store
+
+ + + +
+Fields + + +
+
+opt_in: bool +
+
+ True if the user opt in, false if the user opt-out +
+
+ + +
+ + + +## Struct `OptInTransfer` + +Event emitted when an user opt-in the direct transfer + + +
#[event]
+struct OptInTransfer has drop, store
+
+ + + +
+Fields + + +
+
+account_address: address +
+
+ +
+
+opt_in: bool +
+
+ True if the user opt in, false if the user opt-out +
+
+ + +
+ + + +## Struct `UriMutationEvent` + +Event emitted when the tokendata uri mutates + + +
struct UriMutationEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_uri: string::String +
+
+ +
+
+new_uri: string::String +
+
+ +
+
+ + +
+ + + +## Struct `UriMutation` + +Event emitted when the tokendata uri mutates + + +
#[event]
+struct UriMutation has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_uri: string::String +
+
+ +
+
+new_uri: string::String +
+
+ +
+
+ + +
+ + + +## Struct `DefaultPropertyMutateEvent` + +Event emitted when mutating the default the token properties stored at tokendata + + +
struct DefaultPropertyMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+keys: vector<string::String> +
+
+ +
+
+old_values: vector<option::Option<property_map::PropertyValue>> +
+
+ we allow upsert so the old values might be none +
+
+new_values: vector<property_map::PropertyValue> +
+
+ +
+
+ + +
+ + + +## Struct `DefaultPropertyMutate` + +Event emitted when mutating the default the token properties stored at tokendata + + +
#[event]
+struct DefaultPropertyMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+keys: vector<string::String> +
+
+ +
+
+old_values: vector<option::Option<property_map::PropertyValue>> +
+
+ we allow upsert so the old values might be none +
+
+new_values: vector<property_map::PropertyValue> +
+
+ +
+
+ + +
+ + + +## Struct `DescriptionMutateEvent` + +Event emitted when the tokendata description is mutated + + +
struct DescriptionMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_description: string::String +
+
+ +
+
+new_description: string::String +
+
+ +
+
+ + +
+ + + +## Struct `DescriptionMutate` + +Event emitted when the tokendata description is mutated + + +
#[event]
+struct DescriptionMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_description: string::String +
+
+ +
+
+new_description: string::String +
+
+ +
+
+ + +
+ + + +## Struct `RoyaltyMutateEvent` + +Event emitted when the token royalty is mutated + + +
struct RoyaltyMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_royalty_numerator: u64 +
+
+ +
+
+old_royalty_denominator: u64 +
+
+ +
+
+old_royalty_payee_addr: address +
+
+ +
+
+new_royalty_numerator: u64 +
+
+ +
+
+new_royalty_denominator: u64 +
+
+ +
+
+new_royalty_payee_addr: address +
+
+ +
+
+ + +
+ + + +## Struct `RoyaltyMutate` + +Event emitted when the token royalty is mutated + + +
#[event]
+struct RoyaltyMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_royalty_numerator: u64 +
+
+ +
+
+old_royalty_denominator: u64 +
+
+ +
+
+old_royalty_payee_addr: address +
+
+ +
+
+new_royalty_numerator: u64 +
+
+ +
+
+new_royalty_denominator: u64 +
+
+ +
+
+new_royalty_payee_addr: address +
+
+ +
+
+ + +
+ + + +## Struct `MaxiumMutateEvent` + +Event emitted when the token maximum is mutated + + +
struct MaxiumMutateEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_maximum: u64 +
+
+ +
+
+new_maximum: u64 +
+
+ +
+
+ + +
+ + + +## Struct `MaximumMutate` + +Event emitted when the token maximum is mutated + + +
#[event]
+struct MaximumMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+collection: string::String +
+
+ +
+
+token: string::String +
+
+ +
+
+old_maximum: u64 +
+
+ +
+
+new_maximum: u64 +
+
+ +
+
+ + +
+ + + +## Resource `TokenEventStoreV1` + + + +
struct TokenEventStoreV1 has key
+
+ + + +
+Fields + + +
+
+collection_uri_mutate_events: event::EventHandle<token_event_store::CollectionUriMutateEvent> +
+
+ collection mutation events +
+
+collection_maximum_mutate_events: event::EventHandle<token_event_store::CollectionMaxiumMutateEvent> +
+
+ +
+
+collection_description_mutate_events: event::EventHandle<token_event_store::CollectionDescriptionMutateEvent> +
+
+ +
+
+opt_in_events: event::EventHandle<token_event_store::OptInTransferEvent> +
+
+ token transfer opt-in event +
+
+uri_mutate_events: event::EventHandle<token_event_store::UriMutationEvent> +
+
+ token mutation events +
+
+default_property_mutate_events: event::EventHandle<token_event_store::DefaultPropertyMutateEvent> +
+
+ +
+
+description_mutate_events: event::EventHandle<token_event_store::DescriptionMutateEvent> +
+
+ +
+
+royalty_mutate_events: event::EventHandle<token_event_store::RoyaltyMutateEvent> +
+
+ +
+
+maximum_mutate_events: event::EventHandle<token_event_store::MaxiumMutateEvent> +
+
+ +
+
+extension: option::Option<any::Any> +
+
+ This is for adding new events in future +
+
+ + +
+ + + +## Function `initialize_token_event_store` + + + +
fun initialize_token_event_store(acct: &signer)
+
+ + + +
+Implementation + + +
fun initialize_token_event_store(acct: &signer){
+    if (!exists<TokenEventStoreV1>(signer::address_of(acct))) {
+        move_to(acct, TokenEventStoreV1 {
+            collection_uri_mutate_events: account::new_event_handle<CollectionUriMutateEvent>(acct),
+            collection_maximum_mutate_events: account::new_event_handle<CollectionMaxiumMutateEvent>(acct),
+            collection_description_mutate_events: account::new_event_handle<CollectionDescriptionMutateEvent>(acct),
+            opt_in_events: account::new_event_handle<OptInTransferEvent>(acct),
+            uri_mutate_events: account::new_event_handle<UriMutationEvent>(acct),
+            default_property_mutate_events: account::new_event_handle<DefaultPropertyMutateEvent>(acct),
+            description_mutate_events: account::new_event_handle<DescriptionMutateEvent>(acct),
+            royalty_mutate_events: account::new_event_handle<RoyaltyMutateEvent>(acct),
+            maximum_mutate_events: account::new_event_handle<MaxiumMutateEvent>(acct),
+            extension: option::none<Any>(),
+        });
+    };
+}
+
+ + + +
+ + + +## Function `emit_collection_uri_mutate_event` + +Emit the collection uri mutation event + + +
public(friend) fun emit_collection_uri_mutate_event(creator: &signer, collection: string::String, old_uri: string::String, new_uri: string::String)
+
+ + + +
+Implementation + + +
public(friend) fun emit_collection_uri_mutate_event(creator: &signer, collection: String, old_uri: String, new_uri: String) acquires TokenEventStoreV1 {
+    let event = CollectionUriMutateEvent {
+        creator_addr: signer::address_of(creator),
+        collection_name: collection,
+        old_uri,
+        new_uri,
+    };
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(creator));
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CollectionUriMutate {
+                creator_addr: signer::address_of(creator),
+                collection_name: collection,
+                old_uri,
+                new_uri,
+            }
+        );
+    };
+    event::emit_event<CollectionUriMutateEvent>(
+        &mut token_event_store.collection_uri_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_collection_description_mutate_event` + +Emit the collection description mutation event + + +
public(friend) fun emit_collection_description_mutate_event(creator: &signer, collection: string::String, old_description: string::String, new_description: string::String)
+
+ + + +
+Implementation + + +
public(friend) fun emit_collection_description_mutate_event(creator: &signer, collection: String, old_description: String, new_description: String) acquires TokenEventStoreV1 {
+    let event = CollectionDescriptionMutateEvent {
+        creator_addr: signer::address_of(creator),
+        collection_name: collection,
+        old_description,
+        new_description,
+    };
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(creator));
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CollectionDescriptionMutate {
+                creator_addr: signer::address_of(creator),
+                collection_name: collection,
+                old_description,
+                new_description,
+            }
+        );
+    };
+    event::emit_event<CollectionDescriptionMutateEvent>(
+        &mut token_event_store.collection_description_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_collection_maximum_mutate_event` + +Emit the collection maximum mutation event + + +
public(friend) fun emit_collection_maximum_mutate_event(creator: &signer, collection: string::String, old_maximum: u64, new_maximum: u64)
+
+ + + +
+Implementation + + +
public(friend) fun emit_collection_maximum_mutate_event(creator: &signer, collection: String, old_maximum: u64, new_maximum: u64) acquires TokenEventStoreV1 {
+    let event = CollectionMaxiumMutateEvent {
+        creator_addr: signer::address_of(creator),
+        collection_name: collection,
+        old_maximum,
+        new_maximum,
+    };
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(creator));
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CollectionMaxiumMutate {
+                creator_addr: signer::address_of(creator),
+                collection_name: collection,
+                old_maximum,
+                new_maximum,
+            }
+        );
+    };
+    event::emit_event<CollectionMaxiumMutateEvent>(
+        &mut token_event_store.collection_maximum_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_token_opt_in_event` + +Emit the direct opt-in event + + +
public(friend) fun emit_token_opt_in_event(account: &signer, opt_in: bool)
+
+ + + +
+Implementation + + +
public(friend) fun emit_token_opt_in_event(account: &signer, opt_in: bool) acquires TokenEventStoreV1 {
+    let opt_in_event = OptInTransferEvent {
+      opt_in,
+    };
+    initialize_token_event_store(account);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(account));
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            OptInTransfer {
+                account_address: signer::address_of(account),
+                opt_in,
+            });
+    };
+    event::emit_event<OptInTransferEvent>(
+        &mut token_event_store.opt_in_events,
+        opt_in_event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_token_uri_mutate_event` + +Emit URI mutation event + + +
public(friend) fun emit_token_uri_mutate_event(creator: &signer, collection: string::String, token: string::String, old_uri: string::String, new_uri: string::String)
+
+ + + +
+Implementation + + +
public(friend) fun emit_token_uri_mutate_event(
+    creator: &signer,
+    collection: String,
+    token: String,
+    old_uri: String,
+    new_uri: String,
+) acquires TokenEventStoreV1 {
+    let creator_addr = signer::address_of(creator);
+
+    let event = UriMutationEvent {
+        creator: creator_addr,
+        collection,
+        token,
+        old_uri,
+        new_uri,
+    };
+
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            UriMutation {
+                creator: creator_addr,
+                collection,
+                token,
+                old_uri,
+                new_uri,
+            });
+    };
+    event::emit_event<UriMutationEvent>(
+        &mut token_event_store.uri_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_default_property_mutate_event` + +Emit tokendata property map mutation event + + +
public(friend) fun emit_default_property_mutate_event(creator: &signer, collection: string::String, token: string::String, keys: vector<string::String>, old_values: vector<option::Option<property_map::PropertyValue>>, new_values: vector<property_map::PropertyValue>)
+
+ + + +
+Implementation + + +
public(friend) fun emit_default_property_mutate_event(
+    creator: &signer,
+    collection: String,
+    token: String,
+    keys: vector<String>,
+    old_values: vector<Option<PropertyValue>>,
+    new_values: vector<PropertyValue>,
+) acquires TokenEventStoreV1 {
+    let creator_addr = signer::address_of(creator);
+
+    let event = DefaultPropertyMutateEvent {
+        creator: creator_addr,
+        collection,
+        token,
+        keys,
+        old_values,
+        new_values,
+    };
+
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            DefaultPropertyMutate {
+                creator: creator_addr,
+                collection,
+                token,
+                keys,
+                old_values,
+                new_values,
+            });
+    };
+    event::emit_event<DefaultPropertyMutateEvent>(
+        &mut token_event_store.default_property_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_token_descrition_mutate_event` + +Emit description mutation event + + +
public(friend) fun emit_token_descrition_mutate_event(creator: &signer, collection: string::String, token: string::String, old_description: string::String, new_description: string::String)
+
+ + + +
+Implementation + + +
public(friend) fun emit_token_descrition_mutate_event(
+    creator: &signer,
+    collection: String,
+    token: String,
+    old_description: String,
+    new_description: String,
+) acquires TokenEventStoreV1 {
+    let creator_addr = signer::address_of(creator);
+
+    let event = DescriptionMutateEvent {
+        creator: creator_addr,
+        collection,
+        token,
+        old_description,
+        new_description,
+    };
+
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            DescriptionMutate {
+                creator: creator_addr,
+                collection,
+                token,
+                old_description,
+                new_description,
+            });
+    };
+    event::emit_event<DescriptionMutateEvent>(
+        &mut token_event_store.description_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_token_royalty_mutate_event` + +Emit royalty mutation event + + +
public(friend) fun emit_token_royalty_mutate_event(creator: &signer, collection: string::String, token: string::String, old_royalty_numerator: u64, old_royalty_denominator: u64, old_royalty_payee_addr: address, new_royalty_numerator: u64, new_royalty_denominator: u64, new_royalty_payee_addr: address)
+
+ + + +
+Implementation + + +
public(friend) fun emit_token_royalty_mutate_event(
+    creator: &signer,
+    collection: String,
+    token: String,
+    old_royalty_numerator: u64,
+    old_royalty_denominator: u64,
+    old_royalty_payee_addr: address,
+    new_royalty_numerator: u64,
+    new_royalty_denominator: u64,
+    new_royalty_payee_addr: address,
+) acquires TokenEventStoreV1 {
+    let creator_addr = signer::address_of(creator);
+    let event = RoyaltyMutateEvent {
+        creator: creator_addr,
+        collection,
+        token,
+        old_royalty_numerator,
+        old_royalty_denominator,
+        old_royalty_payee_addr,
+        new_royalty_numerator,
+        new_royalty_denominator,
+        new_royalty_payee_addr,
+    };
+
+    initialize_token_event_store(creator);
+    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            RoyaltyMutate {
+                creator: creator_addr,
+                collection,
+                token,
+                old_royalty_numerator,
+                old_royalty_denominator,
+                old_royalty_payee_addr,
+                new_royalty_numerator,
+                new_royalty_denominator,
+                new_royalty_payee_addr,
+            });
+    };
+    event::emit_event<RoyaltyMutateEvent>(
+        &mut token_event_store.royalty_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Function `emit_token_maximum_mutate_event` + +Emit maximum mutation event + + +
public(friend) fun emit_token_maximum_mutate_event(creator: &signer, collection: string::String, token: string::String, old_maximum: u64, new_maximum: u64)
+
+ + + +
+Implementation + + +
public(friend) fun emit_token_maximum_mutate_event(
+    creator: &signer,
+    collection: String,
+    token: String,
+    old_maximum: u64,
+    new_maximum: u64,
+) acquires TokenEventStoreV1 {
+    let creator_addr = signer::address_of(creator);
+
+    let event = MaxiumMutateEvent {
+        creator: creator_addr,
+        collection,
+        token,
+        old_maximum,
+        new_maximum,
+    };
+
+    initialize_token_event_store(creator);
+    let token_event_store =  borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            MaximumMutate {
+                creator: creator_addr,
+                collection,
+                token,
+                old_maximum,
+                new_maximum,
+            });
+    };
+    event::emit_event<MaxiumMutateEvent>(
+        &mut token_event_store.maximum_mutate_events,
+        event,
+    );
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize_token_event_store` + + +
fun initialize_token_event_store(acct: &signer)
+
+ + + + +
pragma verify = true;
+let addr = signer::address_of(acct);
+include InitializeTokenEventStoreAbortsIf {creator : acct};
+
+ + +Adjust the overflow value according to the +number of registered events + + + + + +
schema InitializeTokenEventStoreAbortsIf {
+    creator: &signer;
+    let addr = signer::address_of(creator);
+    let account = global<Account>(addr);
+    aborts_if !exists<TokenEventStoreV1>(addr) && !exists<Account>(addr);
+    aborts_if !exists<TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if !exists<TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
+}
+
+ + + + + + + +
schema TokenEventStoreAbortsIf {
+    creator: &signer;
+    let addr = signer::address_of(creator);
+    let account = global<Account>(addr);
+    aborts_if !exists<Account>(addr);
+    aborts_if account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if account.guid_creation_num + 9 > MAX_U64;
+}
+
+ + + + + +### Function `emit_collection_uri_mutate_event` + + +
public(friend) fun emit_collection_uri_mutate_event(creator: &signer, collection: string::String, old_uri: string::String, new_uri: string::String)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_collection_description_mutate_event` + + +
public(friend) fun emit_collection_description_mutate_event(creator: &signer, collection: string::String, old_description: string::String, new_description: string::String)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_collection_maximum_mutate_event` + + +
public(friend) fun emit_collection_maximum_mutate_event(creator: &signer, collection: string::String, old_maximum: u64, new_maximum: u64)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_token_opt_in_event` + + +
public(friend) fun emit_token_opt_in_event(account: &signer, opt_in: bool)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf {creator : account};
+
+ + + + + +### Function `emit_token_uri_mutate_event` + + +
public(friend) fun emit_token_uri_mutate_event(creator: &signer, collection: string::String, token: string::String, old_uri: string::String, new_uri: string::String)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_default_property_mutate_event` + + +
public(friend) fun emit_default_property_mutate_event(creator: &signer, collection: string::String, token: string::String, keys: vector<string::String>, old_values: vector<option::Option<property_map::PropertyValue>>, new_values: vector<property_map::PropertyValue>)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_token_descrition_mutate_event` + + +
public(friend) fun emit_token_descrition_mutate_event(creator: &signer, collection: string::String, token: string::String, old_description: string::String, new_description: string::String)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_token_royalty_mutate_event` + + +
public(friend) fun emit_token_royalty_mutate_event(creator: &signer, collection: string::String, token: string::String, old_royalty_numerator: u64, old_royalty_denominator: u64, old_royalty_payee_addr: address, new_royalty_numerator: u64, new_royalty_denominator: u64, new_royalty_payee_addr: address)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + + + + +### Function `emit_token_maximum_mutate_event` + + +
public(friend) fun emit_token_maximum_mutate_event(creator: &signer, collection: string::String, token: string::String, old_maximum: u64, new_maximum: u64)
+
+ + + + +
include InitializeTokenEventStoreAbortsIf;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_transfers.md b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_transfers.md new file mode 100644 index 0000000000000..db16be95cd33f --- /dev/null +++ b/aptos-move/framework/aptos-token/tests/compiler-v2-doc/token_transfers.md @@ -0,0 +1,953 @@ + + + +# Module `0x3::token_transfers` + +This module provides the foundation for transferring of Tokens + + +- [Resource `PendingClaims`](#0x3_token_transfers_PendingClaims) +- [Struct `TokenOfferId`](#0x3_token_transfers_TokenOfferId) +- [Struct `TokenOffer`](#0x3_token_transfers_TokenOffer) +- [Struct `TokenOfferEvent`](#0x3_token_transfers_TokenOfferEvent) +- [Struct `TokenCancelOfferEvent`](#0x3_token_transfers_TokenCancelOfferEvent) +- [Struct `TokenCancelOffer`](#0x3_token_transfers_TokenCancelOffer) +- [Struct `TokenClaimEvent`](#0x3_token_transfers_TokenClaimEvent) +- [Struct `TokenClaim`](#0x3_token_transfers_TokenClaim) +- [Constants](#@Constants_0) +- [Function `initialize_token_transfers`](#0x3_token_transfers_initialize_token_transfers) +- [Function `create_token_offer_id`](#0x3_token_transfers_create_token_offer_id) +- [Function `offer_script`](#0x3_token_transfers_offer_script) +- [Function `offer`](#0x3_token_transfers_offer) +- [Function `claim_script`](#0x3_token_transfers_claim_script) +- [Function `claim`](#0x3_token_transfers_claim) +- [Function `cancel_offer_script`](#0x3_token_transfers_cancel_offer_script) +- [Function `cancel_offer`](#0x3_token_transfers_cancel_offer) +- [Specification](#@Specification_1) + - [Function `initialize_token_transfers`](#@Specification_1_initialize_token_transfers) + - [Function `create_token_offer_id`](#@Specification_1_create_token_offer_id) + - [Function `offer_script`](#@Specification_1_offer_script) + - [Function `offer`](#@Specification_1_offer) + - [Function `claim_script`](#@Specification_1_claim_script) + - [Function `claim`](#@Specification_1_claim) + - [Function `cancel_offer_script`](#@Specification_1_cancel_offer_script) + - [Function `cancel_offer`](#@Specification_1_cancel_offer) + + +
use 0x1::account;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::table;
+use 0x3::token;
+
+ + + + + +## Resource `PendingClaims` + + + +
struct PendingClaims has key
+
+ + + +
+Fields + + +
+
+pending_claims: table::Table<token_transfers::TokenOfferId, token::Token> +
+
+ +
+
+offer_events: event::EventHandle<token_transfers::TokenOfferEvent> +
+
+ +
+
+cancel_offer_events: event::EventHandle<token_transfers::TokenCancelOfferEvent> +
+
+ +
+
+claim_events: event::EventHandle<token_transfers::TokenClaimEvent> +
+
+ +
+
+ + +
+ + + +## Struct `TokenOfferId` + + + +
#[event]
+struct TokenOfferId has copy, drop, store
+
+ + + +
+Fields + + +
+
+to_addr: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+ + +
+ + + +## Struct `TokenOffer` + + + +
#[event]
+struct TokenOffer has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenOfferEvent` + + + +
#[event]
+struct TokenOfferEvent has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenCancelOfferEvent` + + + +
#[event]
+struct TokenCancelOfferEvent has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenCancelOffer` + + + +
#[event]
+struct TokenCancelOffer has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenClaimEvent` + + + +
#[event]
+struct TokenClaimEvent has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenClaim` + + + +
#[event]
+struct TokenClaim has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Token offer doesn't exist + + +
const ETOKEN_OFFER_NOT_EXIST: u64 = 1;
+
+ + + + + +## Function `initialize_token_transfers` + + + +
fun initialize_token_transfers(account: &signer)
+
+ + + +
+Implementation + + +
fun initialize_token_transfers(account: &signer) {
+    move_to(
+        account,
+        PendingClaims {
+            pending_claims: table::new<TokenOfferId, Token>(),
+            offer_events: account::new_event_handle<TokenOfferEvent>(account),
+            cancel_offer_events: account::new_event_handle<TokenCancelOfferEvent>(account),
+            claim_events: account::new_event_handle<TokenClaimEvent>(account),
+        }
+    )
+}
+
+ + + +
+ + + +## Function `create_token_offer_id` + + + +
fun create_token_offer_id(to_addr: address, token_id: token::TokenId): token_transfers::TokenOfferId
+
+ + + +
+Implementation + + +
fun create_token_offer_id(to_addr: address, token_id: TokenId): TokenOfferId {
+    TokenOfferId {
+        to_addr,
+        token_id
+    }
+}
+
+ + + +
+ + + +## Function `offer_script` + + + +
public entry fun offer_script(sender: signer, receiver: address, creator: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun offer_script(
+    sender: signer,
+    receiver: address,
+    creator: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+    amount: u64,
+) acquires PendingClaims {
+    let token_id = token::create_token_id_raw(creator, collection, name, property_version);
+    offer(&sender, receiver, token_id, amount);
+}
+
+ + + +
+ + + +## Function `offer` + + + +
public fun offer(sender: &signer, receiver: address, token_id: token::TokenId, amount: u64)
+
+ + + +
+Implementation + + +
public fun offer(
+    sender: &signer,
+    receiver: address,
+    token_id: TokenId,
+    amount: u64,
+) acquires PendingClaims {
+    let sender_addr = signer::address_of(sender);
+    if (!exists<PendingClaims>(sender_addr)) {
+        initialize_token_transfers(sender)
+    };
+
+    let pending_claims =
+        &mut borrow_global_mut<PendingClaims>(sender_addr).pending_claims;
+    let token_offer_id = create_token_offer_id(receiver, token_id);
+    let token = token::withdraw_token(sender, token_id, amount);
+    if (!table::contains(pending_claims, token_offer_id)) {
+        table::add(pending_claims, token_offer_id, token);
+    } else {
+        let dst_token = table::borrow_mut(pending_claims, token_offer_id);
+        token::merge(dst_token, token);
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            TokenOffer {
+                to_address: receiver,
+                token_id,
+                amount,
+            }
+        )
+    };
+    event::emit_event<TokenOfferEvent>(
+        &mut borrow_global_mut<PendingClaims>(sender_addr).offer_events,
+        TokenOfferEvent {
+            to_address: receiver,
+            token_id,
+            amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `claim_script` + + + +
public entry fun claim_script(receiver: signer, sender: address, creator: address, collection: string::String, name: string::String, property_version: u64)
+
+ + + +
+Implementation + + +
public entry fun claim_script(
+    receiver: signer,
+    sender: address,
+    creator: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+) acquires PendingClaims {
+    let token_id = token::create_token_id_raw(creator, collection, name, property_version);
+    claim(&receiver, sender, token_id);
+}
+
+ + + +
+ + + +## Function `claim` + + + +
public fun claim(receiver: &signer, sender: address, token_id: token::TokenId)
+
+ + + +
+Implementation + + +
public fun claim(
+    receiver: &signer,
+    sender: address,
+    token_id: TokenId,
+) acquires PendingClaims {
+    assert!(exists<PendingClaims>(sender), ETOKEN_OFFER_NOT_EXIST);
+    let pending_claims =
+        &mut borrow_global_mut<PendingClaims>(sender).pending_claims;
+    let token_offer_id = create_token_offer_id(signer::address_of(receiver), token_id);
+    assert!(table::contains(pending_claims, token_offer_id), error::not_found(ETOKEN_OFFER_NOT_EXIST));
+    let tokens = table::remove(pending_claims, token_offer_id);
+    let amount = token::get_token_amount(&tokens);
+    token::deposit_token(receiver, tokens);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            TokenClaim {
+                to_address: signer::address_of(receiver),
+                token_id,
+                amount,
+            }
+        )
+    };
+    event::emit_event<TokenClaimEvent>(
+        &mut borrow_global_mut<PendingClaims>(sender).claim_events,
+        TokenClaimEvent {
+            to_address: signer::address_of(receiver),
+            token_id,
+            amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `cancel_offer_script` + + + +
public entry fun cancel_offer_script(sender: signer, receiver: address, creator: address, collection: string::String, name: string::String, property_version: u64)
+
+ + + +
+Implementation + + +
public entry fun cancel_offer_script(
+    sender: signer,
+    receiver: address,
+    creator: address,
+    collection: String,
+    name: String,
+    property_version: u64,
+) acquires PendingClaims {
+    let token_id = token::create_token_id_raw(creator, collection, name, property_version);
+    cancel_offer(&sender, receiver, token_id);
+}
+
+ + + +
+ + + +## Function `cancel_offer` + + + +
public fun cancel_offer(sender: &signer, receiver: address, token_id: token::TokenId)
+
+ + + +
+Implementation + + +
public fun cancel_offer(
+    sender: &signer,
+    receiver: address,
+    token_id: TokenId,
+) acquires PendingClaims {
+    let sender_addr = signer::address_of(sender);
+    let token_offer_id = create_token_offer_id(receiver, token_id);
+    assert!(exists<PendingClaims>(sender_addr), ETOKEN_OFFER_NOT_EXIST);
+    let pending_claims =
+        &mut borrow_global_mut<PendingClaims>(sender_addr).pending_claims;
+    let token = table::remove(pending_claims, token_offer_id);
+    let amount = token::get_token_amount(&token);
+    token::deposit_token(sender, token);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            TokenCancelOffer {
+                to_address: receiver,
+                token_id,
+                amount,
+            },
+        )
+    };
+    event::emit_event<TokenCancelOfferEvent>(
+        &mut borrow_global_mut<PendingClaims>(sender_addr).cancel_offer_events,
+        TokenCancelOfferEvent {
+            to_address: receiver,
+            token_id,
+            amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize_token_transfers` + + +
fun initialize_token_transfers(account: &signer)
+
+ + + + +
include InitializeTokenTransfersAbortsIf;
+
+ + +Abort according to the code + + + + + +
schema InitializeTokenTransfersAbortsIf {
+    account: &signer;
+    let addr = signer::address_of(account);
+    aborts_if exists<PendingClaims>(addr);
+    let account = global<Account>(addr);
+    aborts_if !exists<Account>(addr);
+    aborts_if account.guid_creation_num + 3 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if account.guid_creation_num + 3 > MAX_U64;
+}
+
+ + + + + +### Function `create_token_offer_id` + + +
fun create_token_offer_id(to_addr: address, token_id: token::TokenId): token_transfers::TokenOfferId
+
+ + + + +
aborts_if false;
+
+ + + + + +### Function `offer_script` + + +
public entry fun offer_script(sender: signer, receiver: address, creator: address, collection: string::String, name: string::String, property_version: u64, amount: u64)
+
+ + + + +
pragma verify = false;
+let token_id = token::create_token_id_raw(creator, collection, name, property_version);
+
+ + + + + +### Function `offer` + + +
public fun offer(sender: &signer, receiver: address, token_id: token::TokenId, amount: u64)
+
+ + + + +
pragma verify = false;
+let sender_addr = signer::address_of(sender);
+include !exists<PendingClaims>(sender_addr) ==> InitializeTokenTransfersAbortsIf{account : sender};
+let pending_claims = global<PendingClaims>(sender_addr).pending_claims;
+let token_offer_id = create_token_offer_id(receiver, token_id);
+let tokens = global<TokenStore>(sender_addr).tokens;
+aborts_if amount <= 0;
+aborts_if token::spec_balance_of(sender_addr, token_id) < amount;
+aborts_if !exists<TokenStore>(sender_addr);
+aborts_if !table::spec_contains(tokens, token_id);
+aborts_if !table::spec_contains(pending_claims, token_offer_id);
+let a = table::spec_contains(pending_claims, token_offer_id);
+let dst_token = table::spec_get(pending_claims, token_offer_id);
+aborts_if dst_token.amount + spce_get(signer::address_of(sender), token_id, amount) > MAX_U64;
+
+ + +Get the amount from sender token + + + + + +
fun spce_get(
+   account_addr: address,
+   id: TokenId,
+   amount: u64
+): u64 {
+   use aptos_token::token::{TokenStore};
+   use aptos_std::table::{Self};
+   let tokens = global<TokenStore>(account_addr).tokens;
+   let balance = table::spec_get(tokens, id).amount;
+   if (balance > amount) {
+       amount
+   } else {
+       table::spec_get(tokens, id).amount
+   }
+}
+
+ + + + + +### Function `claim_script` + + +
public entry fun claim_script(receiver: signer, sender: address, creator: address, collection: string::String, name: string::String, property_version: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+let token_id = token::create_token_id_raw(creator, collection, name, property_version);
+aborts_if !exists<PendingClaims>(sender);
+let pending_claims = global<PendingClaims>(sender).pending_claims;
+let token_offer_id = create_token_offer_id(signer::address_of(receiver), token_id);
+aborts_if !table::spec_contains(pending_claims, token_offer_id);
+let tokens = table::spec_get(pending_claims, token_offer_id);
+include token::InitializeTokenStore{account: receiver };
+let account_addr = signer::address_of(receiver);
+let token = tokens;
+let token_store = global<TokenStore>(account_addr);
+let recipient_token = table::spec_get(token_store.tokens, token.id);
+let b = table::spec_contains(token_store.tokens, token.id);
+aborts_if token.amount <= 0;
+
+ + + + + +### Function `claim` + + +
public fun claim(receiver: &signer, sender: address, token_id: token::TokenId)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if !exists<PendingClaims>(sender);
+let pending_claims = global<PendingClaims>(sender).pending_claims;
+let token_offer_id = create_token_offer_id(signer::address_of(receiver), token_id);
+aborts_if !table::spec_contains(pending_claims, token_offer_id);
+let tokens = table::spec_get(pending_claims, token_offer_id);
+include token::InitializeTokenStore{account: receiver };
+let account_addr = signer::address_of(receiver);
+let token = tokens;
+let token_store = global<TokenStore>(account_addr);
+let recipient_token = table::spec_get(token_store.tokens, token.id);
+let b = table::spec_contains(token_store.tokens, token.id);
+aborts_if token.amount <= 0;
+
+ + + + + +### Function `cancel_offer_script` + + +
public entry fun cancel_offer_script(sender: signer, receiver: address, creator: address, collection: string::String, name: string::String, property_version: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+let token_id = token::create_token_id_raw(creator, collection, name, property_version);
+let sender_addr = signer::address_of(sender);
+aborts_if !exists<PendingClaims>(sender_addr);
+let pending_claims = global<PendingClaims>(sender_addr).pending_claims;
+let token_offer_id = create_token_offer_id(receiver, token_id);
+aborts_if !table::spec_contains(pending_claims, token_offer_id);
+include token::InitializeTokenStore{account: sender };
+let dst_token = table::spec_get(pending_claims, token_offer_id);
+let account_addr = sender_addr;
+let token = dst_token;
+let token_store = global<TokenStore>(account_addr);
+let recipient_token = table::spec_get(token_store.tokens, token.id);
+let b = table::spec_contains(token_store.tokens, token.id);
+aborts_if token.amount <= 0;
+
+ + + + + +### Function `cancel_offer` + + +
public fun cancel_offer(sender: &signer, receiver: address, token_id: token::TokenId)
+
+ + + + +
pragma aborts_if_is_partial;
+let sender_addr = signer::address_of(sender);
+aborts_if !exists<PendingClaims>(sender_addr);
+let pending_claims = global<PendingClaims>(sender_addr).pending_claims;
+let token_offer_id = create_token_offer_id(receiver, token_id);
+aborts_if !table::spec_contains(pending_claims, token_offer_id);
+include token::InitializeTokenStore{account: sender };
+let dst_token = table::spec_get(pending_claims, token_offer_id);
+let account_addr = sender_addr;
+let token = dst_token;
+let token_store = global<TokenStore>(account_addr);
+let recipient_token = table::spec_get(token_store.tokens, token.id);
+let b = table::spec_contains(token_store.tokens, token.id);
+aborts_if token.amount <= 0;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index ad305a018ee0d..040e5dfadccb3 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -109,7 +109,7 @@ pub enum EntryFunctionCall { /// Here is an example attack if we don't ask for the second signature `cap_update_table`: /// Alice has rotated her account `addr_a` to `new_addr_a`. As a result, the following entry is created, to help Alice when recovering her wallet: /// `OriginatingAddress[new_addr_a]` -> `addr_a` - /// Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. + /// Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. /// (Fortunately, she still has her secret key `new_sk_a` associated with her new address `new_addr_a`, so she can do this.) /// /// But Bob likes to mess with Alice. @@ -259,6 +259,67 @@ pub enum EntryFunctionCall { rpc_port: Vec, }, + /// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should + /// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + /// + /// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. + /// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. + /// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + /// + /// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - + /// ```json + /// { + /// "keys": [ + /// { + /// "alg": "RS256", + /// "use": "sig", + /// "kty": "RSA", + /// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", + /// "kid": "d7b939771a7800c413f90051012d975981916d71", + /// "e": "AQAB" + /// }, + /// { + /// "kty": "RSA", + /// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", + /// "alg": "RS256", + /// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", + /// "e": "AQAB", + /// "use": "sig" + /// } + /// ] + /// } + /// ``` + /// + /// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector + /// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the + /// the corresponding attribute in the second JWK as shown below. + /// + /// ```move + /// use std::string::utf8; + /// supra_framework::jwks::update_federated_jwk_set( + /// jwk_owner, + /// b"https://accounts.google.com", + /// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], + /// vector[utf8(b"RS256"), utf8(b"RS256")], + /// vector[utf8(b"AQAB"), utf8(b"AQAB")], + /// vector[ + /// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), + /// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") + /// ] + /// ) + /// ``` + /// + /// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + /// + /// NOTE: Currently only RSA keys are supported. + JwksUpdateFederatedJwkSet { + iss: Vec, + kid_vec: Vec>, + alg_vec: Vec>, + e_vec: Vec>, + n_vec: Vec>, + }, + /// Withdraw an `amount` of coin `CoinType` from `account` and burn it. ManagedCoinBurn { coin_type: TypeTag, @@ -838,6 +899,17 @@ pub enum EntryFunctionCall { auth_key: AccountAddress, }, + /// SUPRA Primary Fungible Store specific specialized functions, + /// Utilized internally once migration of SUPRA to FungibleAsset is complete. + /// Convenient function to transfer SUPRA to a recipient account that might not exist. + /// This would create the recipient SUPRA PFS first, which also registers it to receive SUPRA, before transferring. + /// TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way + /// to transfer SUPRA) - if we want to allow SUPRA PFS without account itself + SupraAccountFungibleTransferOnly { + to: AccountAddress, + amount: u64, + }, + /// Set whether `account` can receive direct transfers of coins that they have not explicitly registered to receive. SupraAccountSetAllowDirectCoinTransfers { allow: bool, @@ -1287,6 +1359,13 @@ impl EntryFunctionCall { network_port, rpc_port, ), + JwksUpdateFederatedJwkSet { + iss, + kid_vec, + alg_vec, + e_vec, + n_vec, + } => jwks_update_federated_jwk_set(iss, kid_vec, alg_vec, e_vec, n_vec), ManagedCoinBurn { coin_type, amount } => managed_coin_burn(coin_type, amount), ManagedCoinInitialize { coin_type, @@ -1663,6 +1742,9 @@ impl EntryFunctionCall { amounts, } => supra_account_batch_transfer_coins(coin_type, recipients, amounts), SupraAccountCreateAccount { auth_key } => supra_account_create_account(auth_key), + SupraAccountFungibleTransferOnly { to, amount } => { + supra_account_fungible_transfer_only(to, amount) + }, SupraAccountSetAllowDirectCoinTransfers { allow } => { supra_account_set_allow_direct_coin_transfers(allow) }, @@ -2004,7 +2086,7 @@ pub fn account_revoke_signer_capability( /// Here is an example attack if we don't ask for the second signature `cap_update_table`: /// Alice has rotated her account `addr_a` to `new_addr_a`. As a result, the following entry is created, to help Alice when recovering her wallet: /// `OriginatingAddress[new_addr_a]` -> `addr_a` -/// Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. +/// Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. /// (Fortunately, she still has her secret key `new_sk_a` associated with her new address `new_addr_a`, so she can do this.) /// /// But Bob likes to mess with Alice. @@ -2434,6 +2516,86 @@ pub fn committee_map_upsert_committee_member_bulk( )) } +/// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should +/// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. +/// +/// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. +/// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. +/// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. +/// +/// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - +/// ```json +/// { +/// "keys": [ +/// { +/// "alg": "RS256", +/// "use": "sig", +/// "kty": "RSA", +/// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", +/// "kid": "d7b939771a7800c413f90051012d975981916d71", +/// "e": "AQAB" +/// }, +/// { +/// "kty": "RSA", +/// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", +/// "alg": "RS256", +/// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", +/// "e": "AQAB", +/// "use": "sig" +/// } +/// ] +/// } +/// ``` +/// +/// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector +/// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the +/// the corresponding attribute in the second JWK as shown below. +/// +/// ```move +/// use std::string::utf8; +/// supra_framework::jwks::update_federated_jwk_set( +/// jwk_owner, +/// b"https://accounts.google.com", +/// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], +/// vector[utf8(b"RS256"), utf8(b"RS256")], +/// vector[utf8(b"AQAB"), utf8(b"AQAB")], +/// vector[ +/// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), +/// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") +/// ] +/// ) +/// ``` +/// +/// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md +/// +/// NOTE: Currently only RSA keys are supported. +pub fn jwks_update_federated_jwk_set( + iss: Vec, + kid_vec: Vec>, + alg_vec: Vec>, + e_vec: Vec>, + n_vec: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("jwks").to_owned(), + ), + ident_str!("update_federated_jwk_set").to_owned(), + vec![], + vec![ + bcs::to_bytes(&iss).unwrap(), + bcs::to_bytes(&kid_vec).unwrap(), + bcs::to_bytes(&alg_vec).unwrap(), + bcs::to_bytes(&e_vec).unwrap(), + bcs::to_bytes(&n_vec).unwrap(), + ], + )) +} + /// Withdraw an `amount` of coin `CoinType` from `account` and burn it. pub fn managed_coin_burn(coin_type: TypeTag, amount: u64) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( @@ -4214,6 +4376,27 @@ pub fn supra_account_create_account(auth_key: AccountAddress) -> TransactionPayl )) } +/// SUPRA Primary Fungible Store specific specialized functions, +/// Utilized internally once migration of SUPRA to FungibleAsset is complete. +/// Convenient function to transfer SUPRA to a recipient account that might not exist. +/// This would create the recipient SUPRA PFS first, which also registers it to receive SUPRA, before transferring. +/// TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way +/// to transfer SUPRA) - if we want to allow SUPRA PFS without account itself +pub fn supra_account_fungible_transfer_only(to: AccountAddress, amount: u64) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("supra_account").to_owned(), + ), + ident_str!("fungible_transfer_only").to_owned(), + vec![], + vec![bcs::to_bytes(&to).unwrap(), bcs::to_bytes(&amount).unwrap()], + )) +} + /// Set whether `account` can receive direct transfers of coins that they have not explicitly registered to receive. pub fn supra_account_set_allow_direct_coin_transfers(allow: bool) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( @@ -5406,6 +5589,22 @@ mod decoder { } } + pub fn jwks_update_federated_jwk_set( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::JwksUpdateFederatedJwkSet { + iss: bcs::from_bytes(script.args().get(0)?).ok()?, + kid_vec: bcs::from_bytes(script.args().get(1)?).ok()?, + alg_vec: bcs::from_bytes(script.args().get(2)?).ok()?, + e_vec: bcs::from_bytes(script.args().get(3)?).ok()?, + n_vec: bcs::from_bytes(script.args().get(4)?).ok()?, + }) + } else { + None + } + } + pub fn managed_coin_burn(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::ManagedCoinBurn { @@ -6457,6 +6656,19 @@ mod decoder { } } + pub fn supra_account_fungible_transfer_only( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::SupraAccountFungibleTransferOnly { + to: bcs::from_bytes(script.args().get(0)?).ok()?, + amount: bcs::from_bytes(script.args().get(1)?).ok()?, + }) + } else { + None + } + } + pub fn supra_account_set_allow_direct_coin_transfers( payload: &TransactionPayload, ) -> Option { @@ -7082,6 +7294,10 @@ static SCRIPT_FUNCTION_DECODER_MAP: once_cell::sync::Lazy TransactionPayload { coin_transfer( - aptos_types::utility_coin::SUPRA_COIN_TYPE.clone(), + SupraCoinType::type_tag(), to, amount, ) diff --git a/aptos-move/framework/move-stdlib/doc/acl.md b/aptos-move/framework/move-stdlib/doc/acl.md index 5310909afc734..66bf91f9ac9ea 100644 --- a/aptos-move/framework/move-stdlib/doc/acl.md +++ b/aptos-move/framework/move-stdlib/doc/acl.md @@ -114,7 +114,7 @@ Return an empty ACL. Add the address to the ACL. -
public fun add(acl: &mut acl::ACL, addr: address)
+
public fun add(self: &mut acl::ACL, addr: address)
 
@@ -123,9 +123,9 @@ Add the address to the ACL. Implementation -
public fun add(acl: &mut ACL, addr: address) {
-    assert!(!vector::contains(&mut acl.list, &addr), error::invalid_argument(ECONTAIN));
-    vector::push_back(&mut acl.list, addr);
+
public fun add(self: &mut ACL, addr: address) {
+    assert!(!vector::contains(&mut self.list, &addr), error::invalid_argument(ECONTAIN));
+    vector::push_back(&mut self.list, addr);
 }
 
@@ -140,7 +140,7 @@ Add the address to the ACL. Remove the address from the ACL. -
public fun remove(acl: &mut acl::ACL, addr: address)
+
public fun remove(self: &mut acl::ACL, addr: address)
 
@@ -149,10 +149,10 @@ Remove the address from the ACL. Implementation -
public fun remove(acl: &mut ACL, addr: address) {
-    let (found, index) = vector::index_of(&mut acl.list, &addr);
+
public fun remove(self: &mut ACL, addr: address) {
+    let (found, index) = vector::index_of(&mut self.list, &addr);
     assert!(found, error::invalid_argument(ENOT_CONTAIN));
-    vector::remove(&mut acl.list, index);
+    vector::remove(&mut self.list, index);
 }
 
@@ -167,7 +167,7 @@ Remove the address from the ACL. Return true iff the ACL contains the address. -
public fun contains(acl: &acl::ACL, addr: address): bool
+
public fun contains(self: &acl::ACL, addr: address): bool
 
@@ -176,8 +176,8 @@ Return true iff the ACL contains the address. Implementation -
public fun contains(acl: &ACL, addr: address): bool {
-    vector::contains(&acl.list, &addr)
+
public fun contains(self: &ACL, addr: address): bool {
+    vector::contains(&self.list, &addr)
 }
 
@@ -192,7 +192,7 @@ Return true iff the ACL contains the address. assert! that the ACL has the address. -
public fun assert_contains(acl: &acl::ACL, addr: address)
+
public fun assert_contains(self: &acl::ACL, addr: address)
 
@@ -201,8 +201,8 @@ assert! that the ACL has the address. Implementation -
public fun assert_contains(acl: &ACL, addr: address) {
-    assert!(contains(acl, addr), error::invalid_argument(ENOT_CONTAIN));
+
public fun assert_contains(self: &ACL, addr: address) {
+    assert!(contains(self, addr), error::invalid_argument(ENOT_CONTAIN));
 }
 
@@ -245,8 +245,8 @@ assert! that the ACL has the address. -
fun spec_contains(acl: ACL, addr: address): bool {
-   exists a in acl.list: a == addr
+
fun spec_contains(self: ACL, addr: address): bool {
+   exists a in self.list: a == addr
 }
 
@@ -257,14 +257,14 @@ assert! that the ACL has the address. ### Function `add` -
public fun add(acl: &mut acl::ACL, addr: address)
+
public fun add(self: &mut acl::ACL, addr: address)
 
-
aborts_if spec_contains(acl, addr) with error::INVALID_ARGUMENT;
-ensures spec_contains(acl, addr);
+
aborts_if spec_contains(self, addr) with error::INVALID_ARGUMENT;
+ensures spec_contains(self, addr);
 
@@ -274,14 +274,14 @@ assert! that the ACL has the address. ### Function `remove` -
public fun remove(acl: &mut acl::ACL, addr: address)
+
public fun remove(self: &mut acl::ACL, addr: address)
 
-
aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT;
-ensures !spec_contains(acl, addr);
+
aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT;
+ensures !spec_contains(self, addr);
 
@@ -291,13 +291,13 @@ assert! that the ACL has the address. ### Function `contains` -
public fun contains(acl: &acl::ACL, addr: address): bool
+
public fun contains(self: &acl::ACL, addr: address): bool
 
-
ensures result == spec_contains(acl, addr);
+
ensures result == spec_contains(self, addr);
 
@@ -307,13 +307,13 @@ assert! that the ACL has the address. ### Function `assert_contains` -
public fun assert_contains(acl: &acl::ACL, addr: address)
+
public fun assert_contains(self: &acl::ACL, addr: address)
 
-
aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT;
+
aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT;
 
diff --git a/aptos-move/framework/move-stdlib/doc/bcs.md b/aptos-move/framework/move-stdlib/doc/bcs.md index e01e1d89e1a28..9bdcf7f45f7d3 100644 --- a/aptos-move/framework/move-stdlib/doc/bcs.md +++ b/aptos-move/framework/move-stdlib/doc/bcs.md @@ -10,7 +10,9 @@ details on BCS. - [Function `to_bytes`](#0x1_bcs_to_bytes) +- [Function `serialized_size`](#0x1_bcs_serialized_size) - [Specification](#@Specification_0) + - [Function `serialized_size`](#@Specification_0_serialized_size)
@@ -21,7 +23,8 @@ details on BCS. ## Function `to_bytes` -Return the binary representation of v in BCS (Binary Canonical Serialization) format +Returns the binary representation of v in BCS (Binary Canonical Serialization) format. +Aborts with 0x1c5 error code if serialization fails.
public fun to_bytes<MoveValue>(v: &MoveValue): vector<u8>
@@ -38,6 +41,30 @@ Return the binary representation of v in BCS (Binary Canonical Seri
 
 
 
+
+
+
+
+## Function `serialized_size`
+
+Returns the size of the binary representation of v in BCS (Binary Canonical Serialization) format.
+Aborts with 0x1c5 error code if there is a failure when calculating serialized size.
+
+
+
public fun serialized_size<MoveValue>(v: &MoveValue): u64
+
+ + + +
+Implementation + + +
native public fun serialized_size<MoveValue>(v: &MoveValue): u64;
+
+ + +
@@ -56,4 +83,21 @@ Native function which is defined in the prover's prelude.
+ + + +### Function `serialized_size` + + +
public fun serialized_size<MoveValue>(v: &MoveValue): u64
+
+ + + + +
pragma opaque;
+ensures result == len(serialize(v));
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/doc/bit_vector.md b/aptos-move/framework/move-stdlib/doc/bit_vector.md index 873873de4e484..baeb685729360 100644 --- a/aptos-move/framework/move-stdlib/doc/bit_vector.md +++ b/aptos-move/framework/move-stdlib/doc/bit_vector.md @@ -155,10 +155,10 @@ The maximum allowed bitvector size ## Function `set` -Set the bit at bit_index in the bitvector regardless of its previous state. +Set the bit at bit_index in the self regardless of its previous state. -
public fun set(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun set(self: &mut bit_vector::BitVector, bit_index: u64)
 
@@ -167,9 +167,9 @@ Set the bit at bit_index in the bitvector regardless o Implementation -
public fun set(bitvector: &mut BitVector, bit_index: u64) {
-    assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX);
-    let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index);
+
public fun set(self: &mut BitVector, bit_index: u64) {
+    assert!(bit_index < vector::length(&self.bit_field), EINDEX);
+    let x = vector::borrow_mut(&mut self.bit_field, bit_index);
     *x = true;
 }
 
@@ -182,10 +182,10 @@ Set the bit at bit_index in the bitvector regardless o ## Function `unset` -Unset the bit at bit_index in the bitvector regardless of its previous state. +Unset the bit at bit_index in the self regardless of its previous state. -
public fun unset(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun unset(self: &mut bit_vector::BitVector, bit_index: u64)
 
@@ -194,9 +194,9 @@ Unset the bit at bit_index in the bitvector regardless Implementation -
public fun unset(bitvector: &mut BitVector, bit_index: u64) {
-    assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX);
-    let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index);
+
public fun unset(self: &mut BitVector, bit_index: u64) {
+    assert!(bit_index < vector::length(&self.bit_field), EINDEX);
+    let x = vector::borrow_mut(&mut self.bit_field, bit_index);
     *x = false;
 }
 
@@ -209,11 +209,11 @@ Unset the bit at bit_index in the bitvector regardless ## Function `shift_left` -Shift the bitvector left by amount. If amount is greater than the +Shift the self left by amount. If amount is greater than the bitvector's length the bitvector will be zeroed out. -
public fun shift_left(bitvector: &mut bit_vector::BitVector, amount: u64)
+
public fun shift_left(self: &mut bit_vector::BitVector, amount: u64)
 
@@ -222,24 +222,24 @@ bitvector's length the bitvector will be zeroed out. Implementation -
public fun shift_left(bitvector: &mut BitVector, amount: u64) {
-    if (amount >= bitvector.length) {
-        vector::for_each_mut(&mut bitvector.bit_field, |elem| {
+
public fun shift_left(self: &mut BitVector, amount: u64) {
+    if (amount >= self.length) {
+        vector::for_each_mut(&mut self.bit_field, |elem| {
             *elem = false;
         });
     } else {
         let i = amount;
 
-        while (i < bitvector.length) {
-            if (is_index_set(bitvector, i)) set(bitvector, i - amount)
-            else unset(bitvector, i - amount);
+        while (i < self.length) {
+            if (is_index_set(self, i)) set(self, i - amount)
+            else unset(self, i - amount);
             i = i + 1;
         };
 
-        i = bitvector.length - amount;
+        i = self.length - amount;
 
-        while (i < bitvector.length) {
-            unset(bitvector, i);
+        while (i < self.length) {
+            unset(self, i);
             i = i + 1;
         };
     }
@@ -254,11 +254,11 @@ bitvector's length the bitvector will be zeroed out.
 
 ## Function `is_index_set`
 
-Return the value of the bit at bit_index in the bitvector. true
+Return the value of the bit at bit_index in the self. true
 represents "1" and false represents a 0
 
 
-
public fun is_index_set(bitvector: &bit_vector::BitVector, bit_index: u64): bool
+
public fun is_index_set(self: &bit_vector::BitVector, bit_index: u64): bool
 
@@ -267,9 +267,9 @@ represents "1" and false represents a 0 Implementation -
public fun is_index_set(bitvector: &BitVector, bit_index: u64): bool {
-    assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX);
-    *vector::borrow(&bitvector.bit_field, bit_index)
+
public fun is_index_set(self: &BitVector, bit_index: u64): bool {
+    assert!(bit_index < vector::length(&self.bit_field), EINDEX);
+    *vector::borrow(&self.bit_field, bit_index)
 }
 
@@ -284,7 +284,7 @@ represents "1" and false represents a 0 Return the length (number of usable bits) of this bitvector -
public fun length(bitvector: &bit_vector::BitVector): u64
+
public fun length(self: &bit_vector::BitVector): u64
 
@@ -293,8 +293,8 @@ Return the length (number of usable bits) of this bitvector Implementation -
public fun length(bitvector: &BitVector): u64 {
-    vector::length(&bitvector.bit_field)
+
public fun length(self: &BitVector): u64 {
+    vector::length(&self.bit_field)
 }
 
@@ -311,7 +311,7 @@ including) start_index in the bitvector. If there is n sequence, then 0 is returned. -
public fun longest_set_sequence_starting_at(bitvector: &bit_vector::BitVector, start_index: u64): u64
+
public fun longest_set_sequence_starting_at(self: &bit_vector::BitVector, start_index: u64): u64
 
@@ -320,22 +320,22 @@ sequence, then 0 is returned. Implementation -
public fun longest_set_sequence_starting_at(bitvector: &BitVector, start_index: u64): u64 {
-    assert!(start_index < bitvector.length, EINDEX);
+
public fun longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 {
+    assert!(start_index < self.length, EINDEX);
     let index = start_index;
 
     // Find the greatest index in the vector such that all indices less than it are set.
     while ({
         spec {
             invariant index >= start_index;
-            invariant index == start_index || is_index_set(bitvector, index - 1);
-            invariant index == start_index || index - 1 < vector::length(bitvector.bit_field);
-            invariant forall j in start_index..index: is_index_set(bitvector, j);
-            invariant forall j in start_index..index: j < vector::length(bitvector.bit_field);
+            invariant index == start_index || is_index_set(self, index - 1);
+            invariant index == start_index || index - 1 < vector::length(self.bit_field);
+            invariant forall j in start_index..index: is_index_set(self, j);
+            invariant forall j in start_index..index: j < vector::length(self.bit_field);
         };
-        index < bitvector.length
+        index < self.length
     }) {
-        if (!is_index_set(bitvector, index)) break;
+        if (!is_index_set(self, index)) break;
         index = index + 1;
     };
 
@@ -354,7 +354,7 @@ sequence, then 0 is returned.
 
 
 
#[verify_only]
-public fun shift_left_for_verification_only(bitvector: &mut bit_vector::BitVector, amount: u64)
+public fun shift_left_for_verification_only(self: &mut bit_vector::BitVector, amount: u64)
 
@@ -363,19 +363,19 @@ sequence, then 0 is returned. Implementation -
public fun shift_left_for_verification_only(bitvector: &mut BitVector, amount: u64) {
-    if (amount >= bitvector.length) {
-        let len = vector::length(&bitvector.bit_field);
+
public fun shift_left_for_verification_only(self: &mut BitVector, amount: u64) {
+    if (amount >= self.length) {
+        let len = vector::length(&self.bit_field);
         let i = 0;
         while ({
             spec {
-                invariant len == bitvector.length;
-                invariant forall k in 0..i: !bitvector.bit_field[k];
-                invariant forall k in i..bitvector.length: bitvector.bit_field[k] == old(bitvector).bit_field[k];
+                invariant len == self.length;
+                invariant forall k in 0..i: !self.bit_field[k];
+                invariant forall k in i..self.length: self.bit_field[k] == old(self).bit_field[k];
             };
             i < len
         }) {
-            let elem = vector::borrow_mut(&mut bitvector.bit_field, i);
+            let elem = vector::borrow_mut(&mut self.bit_field, i);
             *elem = false;
             i = i + 1;
         };
@@ -385,30 +385,30 @@ sequence, then 0 is returned.
         while ({
             spec {
                 invariant i >= amount;
-                invariant bitvector.length == old(bitvector).length;
-                invariant forall j in amount..i: old(bitvector).bit_field[j] == bitvector.bit_field[j - amount];
-                invariant forall j in (i-amount)..bitvector.length : old(bitvector).bit_field[j] == bitvector.bit_field[j];
-                invariant forall k in 0..i-amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount];
+                invariant self.length == old(self).length;
+                invariant forall j in amount..i: old(self).bit_field[j] == self.bit_field[j - amount];
+                invariant forall j in (i-amount)..self.length : old(self).bit_field[j] == self.bit_field[j];
+                invariant forall k in 0..i-amount: self.bit_field[k] == old(self).bit_field[k + amount];
             };
-            i < bitvector.length
+            i < self.length
         }) {
-            if (is_index_set(bitvector, i)) set(bitvector, i - amount)
-            else unset(bitvector, i - amount);
+            if (is_index_set(self, i)) set(self, i - amount)
+            else unset(self, i - amount);
             i = i + 1;
         };
 
 
-        i = bitvector.length - amount;
+        i = self.length - amount;
 
         while ({
             spec {
-                invariant forall j in bitvector.length - amount..i: !bitvector.bit_field[j];
-                invariant forall k in 0..bitvector.length - amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount];
-                invariant i >= bitvector.length - amount;
+                invariant forall j in self.length - amount..i: !self.bit_field[j];
+                invariant forall k in 0..self.length - amount: self.bit_field[k] == old(self).bit_field[k + amount];
+                invariant i >= self.length - amount;
             };
-            i < bitvector.length
+            i < self.length
         }) {
-            unset(bitvector, i);
+            unset(self, i);
             i = i + 1;
         }
     }
@@ -492,14 +492,14 @@ sequence, then 0 is returned.
 ### Function `set`
 
 
-
public fun set(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun set(self: &mut bit_vector::BitVector, bit_index: u64)
 
include SetAbortsIf;
-ensures bitvector.bit_field[bit_index];
+ensures self.bit_field[bit_index];
 
@@ -509,9 +509,9 @@ sequence, then 0 is returned.
schema SetAbortsIf {
-    bitvector: BitVector;
+    self: BitVector;
     bit_index: u64;
-    aborts_if bit_index >= length(bitvector) with EINDEX;
+    aborts_if bit_index >= length(self) with EINDEX;
 }
 
@@ -522,14 +522,14 @@ sequence, then 0 is returned. ### Function `unset` -
public fun unset(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun unset(self: &mut bit_vector::BitVector, bit_index: u64)
 
include UnsetAbortsIf;
-ensures !bitvector.bit_field[bit_index];
+ensures !self.bit_field[bit_index];
 
@@ -539,9 +539,9 @@ sequence, then 0 is returned.
schema UnsetAbortsIf {
-    bitvector: BitVector;
+    self: BitVector;
     bit_index: u64;
-    aborts_if bit_index >= length(bitvector) with EINDEX;
+    aborts_if bit_index >= length(self) with EINDEX;
 }
 
@@ -552,7 +552,7 @@ sequence, then 0 is returned. ### Function `shift_left` -
public fun shift_left(bitvector: &mut bit_vector::BitVector, amount: u64)
+
public fun shift_left(self: &mut bit_vector::BitVector, amount: u64)
 
@@ -568,14 +568,14 @@ sequence, then 0 is returned. ### Function `is_index_set` -
public fun is_index_set(bitvector: &bit_vector::BitVector, bit_index: u64): bool
+
public fun is_index_set(self: &bit_vector::BitVector, bit_index: u64): bool
 
include IsIndexSetAbortsIf;
-ensures result == bitvector.bit_field[bit_index];
+ensures result == self.bit_field[bit_index];
 
@@ -585,9 +585,9 @@ sequence, then 0 is returned.
schema IsIndexSetAbortsIf {
-    bitvector: BitVector;
+    self: BitVector;
     bit_index: u64;
-    aborts_if bit_index >= length(bitvector) with EINDEX;
+    aborts_if bit_index >= length(self) with EINDEX;
 }
 
@@ -597,11 +597,11 @@ sequence, then 0 is returned. -
fun spec_is_index_set(bitvector: BitVector, bit_index: u64): bool {
-   if (bit_index >= length(bitvector)) {
+
fun spec_is_index_set(self: BitVector, bit_index: u64): bool {
+   if (bit_index >= length(self)) {
        false
    } else {
-       bitvector.bit_field[bit_index]
+       self.bit_field[bit_index]
    }
 }
 
@@ -613,14 +613,14 @@ sequence, then 0 is returned. ### Function `longest_set_sequence_starting_at` -
public fun longest_set_sequence_starting_at(bitvector: &bit_vector::BitVector, start_index: u64): u64
+
public fun longest_set_sequence_starting_at(self: &bit_vector::BitVector, start_index: u64): u64
 
-
aborts_if start_index >= bitvector.length;
-ensures forall i in start_index..result: is_index_set(bitvector, i);
+
aborts_if start_index >= self.length;
+ensures forall i in start_index..result: is_index_set(self, i);
 
@@ -631,18 +631,18 @@ sequence, then 0 is returned.
#[verify_only]
-public fun shift_left_for_verification_only(bitvector: &mut bit_vector::BitVector, amount: u64)
+public fun shift_left_for_verification_only(self: &mut bit_vector::BitVector, amount: u64)
 
aborts_if false;
-ensures amount >= bitvector.length ==> (forall k in 0..bitvector.length: !bitvector.bit_field[k]);
-ensures amount < bitvector.length ==>
-    (forall i in bitvector.length - amount..bitvector.length: !bitvector.bit_field[i]);
-ensures amount < bitvector.length ==>
-    (forall i in 0..bitvector.length - amount: bitvector.bit_field[i] == old(bitvector).bit_field[i + amount]);
+ensures amount >= self.length ==> (forall k in 0..self.length: !self.bit_field[k]);
+ensures amount < self.length ==>
+    (forall i in self.length - amount..self.length: !self.bit_field[i]);
+ensures amount < self.length ==>
+    (forall i in 0..self.length - amount: self.bit_field[i] == old(self).bit_field[i + amount]);
 
diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md index f80e36c15bb98..07594120e5a32 100644 --- a/aptos-move/framework/move-stdlib/doc/features.md +++ b/aptos-move/framework/move-stdlib/doc/features.md @@ -3,8 +3,8 @@ # Module `0x1::features` -Defines feature flags for Supra. Those are used in Supra specific implementations of features in -the Move stdlib, the Supra stdlib, and the Supra framework. +Defines feature flags for Aptos. Those are used in Aptos specific implementations of features in +the Move stdlib, the Aptos stdlib, and the Aptos framework. ============================================================================================ Feature Flag Definitions @@ -129,6 +129,10 @@ return true. - [Function `default_to_concurrent_fungible_balance_enabled`](#0x1_features_default_to_concurrent_fungible_balance_enabled) - [Function `get_abort_if_multisig_payload_mismatch_feature`](#0x1_features_get_abort_if_multisig_payload_mismatch_feature) - [Function `abort_if_multisig_payload_mismatch_enabled`](#0x1_features_abort_if_multisig_payload_mismatch_enabled) +- [Function `get_transaction_simulation_enhancement_feature`](#0x1_features_get_transaction_simulation_enhancement_feature) +- [Function `transaction_simulation_enhancement_enabled`](#0x1_features_transaction_simulation_enhancement_enabled) +- [Function `get_collection_owner_feature`](#0x1_features_get_collection_owner_feature) +- [Function `is_collection_owner_enabled`](#0x1_features_is_collection_owner_enabled) - [Function `change_feature_flags`](#0x1_features_change_feature_flags) - [Function `change_feature_flags_internal`](#0x1_features_change_feature_flags_internal) - [Function `change_feature_flags_for_next_epoch`](#0x1_features_change_feature_flags_for_next_epoch) @@ -352,6 +356,15 @@ Lifetime: transient + + + + +
const COLLECTION_OWNER: u64 = 79;
+
+ + + Whether gas fees are collected and distributed to the block proposers. @@ -867,6 +880,20 @@ Lifetime: transient + + +Whether the simulation enhancement is enabled. This enables the simulation without an authentication check, +the sponsored transaction simulation when the fee payer is set to 0x0, and the multisig transaction +simulation consistnet with the execution. + +Lifetime: transient + + +
const TRANSACTION_SIMULATION_ENHANCEMENT: u64 = 78;
+
+ + + Whether during upgrade compatibility checking, friend functions should be treated similar like @@ -3152,6 +3179,98 @@ Lifetime: transient + + + + +## Function `get_transaction_simulation_enhancement_feature` + + + +
public fun get_transaction_simulation_enhancement_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_transaction_simulation_enhancement_feature(): u64 { TRANSACTION_SIMULATION_ENHANCEMENT }
+
+ + + +
+ + + +## Function `transaction_simulation_enhancement_enabled` + + + +
public fun transaction_simulation_enhancement_enabled(): bool
+
+ + + +
+Implementation + + +
public fun transaction_simulation_enhancement_enabled(): bool acquires Features {
+    is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT)
+}
+
+ + + +
+ + + +## Function `get_collection_owner_feature` + + + +
public fun get_collection_owner_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_collection_owner_feature(): u64 { COLLECTION_OWNER }
+
+ + + +
+ + + +## Function `is_collection_owner_enabled` + + + +
public fun is_collection_owner_enabled(): bool
+
+ + + +
+Implementation + + +
public fun is_collection_owner_enabled(): bool acquires Features {
+    is_enabled(COLLECTION_OWNER)
+}
+
+ + +
@@ -3604,6 +3723,17 @@ Helper to check whether a feature flag is enabled. + + + + +
fun spec_simulation_enhancement_enabled(): bool {
+   spec_is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT)
+}
+
+ + + ### Function `abort_if_multisig_payload_mismatch_enabled` diff --git a/aptos-move/framework/move-stdlib/doc/fixed_point32.md b/aptos-move/framework/move-stdlib/doc/fixed_point32.md index ee8010e510ccc..1c17e78c9dcba 100644 --- a/aptos-move/framework/move-stdlib/doc/fixed_point32.md +++ b/aptos-move/framework/move-stdlib/doc/fixed_point32.md @@ -291,7 +291,7 @@ adding or subtracting FixedPoint32 values, can be done using the raw values directly. -
public fun get_raw_value(num: fixed_point32::FixedPoint32): u64
+
public fun get_raw_value(self: fixed_point32::FixedPoint32): u64
 
@@ -300,8 +300,8 @@ values directly. Implementation -
public fun get_raw_value(num: FixedPoint32): u64 {
-    num.value
+
public fun get_raw_value(self: FixedPoint32): u64 {
+    self.value
 }
 
@@ -316,7 +316,7 @@ values directly. Returns true if the ratio is zero. -
public fun is_zero(num: fixed_point32::FixedPoint32): bool
+
public fun is_zero(self: fixed_point32::FixedPoint32): bool
 
@@ -325,8 +325,8 @@ Returns true if the ratio is zero. Implementation -
public fun is_zero(num: FixedPoint32): bool {
-    num.value == 0
+
public fun is_zero(self: FixedPoint32): bool {
+    self.value == 0
 }
 
@@ -426,7 +426,7 @@ Create a fixedpoint value from a u64 value. Returns the largest integer less than or equal to a given number. -
public fun floor(num: fixed_point32::FixedPoint32): u64
+
public fun floor(self: fixed_point32::FixedPoint32): u64
 
@@ -435,8 +435,8 @@ Returns the largest integer less than or equal to a given number. Implementation -
public fun floor(num: FixedPoint32): u64 {
-    num.value >> 32
+
public fun floor(self: FixedPoint32): u64 {
+    self.value >> 32
 }
 
@@ -451,7 +451,7 @@ Returns the largest integer less than or equal to a given number. Rounds up the given FixedPoint32 to the next largest integer. -
public fun ceil(num: fixed_point32::FixedPoint32): u64
+
public fun ceil(self: fixed_point32::FixedPoint32): u64
 
@@ -460,9 +460,9 @@ Rounds up the given FixedPoint32 to the next largest integer. Implementation -
public fun ceil(num: FixedPoint32): u64 {
-    let floored_num = floor(num) << 32;
-    if (num.value == floored_num) {
+
public fun ceil(self: FixedPoint32): u64 {
+    let floored_num = floor(self) << 32;
+    if (self.value == floored_num) {
         return floored_num >> 32
     };
     let val = ((floored_num as u128) + (1 << 32));
@@ -481,7 +481,7 @@ Rounds up the given FixedPoint32 to the next largest integer.
 Returns the value of a FixedPoint32 to the nearest integer.
 
 
-
public fun round(num: fixed_point32::FixedPoint32): u64
+
public fun round(self: fixed_point32::FixedPoint32): u64
 
@@ -490,13 +490,13 @@ Returns the value of a FixedPoint32 to the nearest integer. Implementation -
public fun round(num: FixedPoint32): u64 {
-    let floored_num = floor(num) << 32;
+
public fun round(self: FixedPoint32): u64 {
+    let floored_num = floor(self) << 32;
     let boundary = floored_num + ((1 << 32) / 2);
-    if (num.value < boundary) {
+    if (self.value < boundary) {
         floored_num >> 32
     } else {
-        ceil(num)
+        ceil(self)
     }
 }
 
@@ -780,7 +780,7 @@ Returns the value of a FixedPoint32 to the nearest integer. ### Function `floor` -
public fun floor(num: fixed_point32::FixedPoint32): u64
+
public fun floor(self: fixed_point32::FixedPoint32): u64
 
@@ -788,7 +788,7 @@ Returns the value of a FixedPoint32 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_floor(num);
+ensures result == spec_floor(self);
 
@@ -797,12 +797,12 @@ Returns the value of a FixedPoint32 to the nearest integer. -
fun spec_floor(val: FixedPoint32): u64 {
-   let fractional = val.value % (1 << 32);
+
fun spec_floor(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
    if (fractional == 0) {
-       val.value >> 32
+       self.value >> 32
    } else {
-       (val.value - fractional) >> 32
+       (self.value - fractional) >> 32
    }
 }
 
@@ -814,7 +814,7 @@ Returns the value of a FixedPoint32 to the nearest integer. ### Function `ceil` -
public fun ceil(num: fixed_point32::FixedPoint32): u64
+
public fun ceil(self: fixed_point32::FixedPoint32): u64
 
@@ -823,7 +823,7 @@ Returns the value of a FixedPoint32 to the nearest integer.
pragma verify_duration_estimate = 120;
 pragma opaque;
 aborts_if false;
-ensures result == spec_ceil(num);
+ensures result == spec_ceil(self);
 
@@ -832,13 +832,13 @@ Returns the value of a FixedPoint32 to the nearest integer. -
fun spec_ceil(val: FixedPoint32): u64 {
-   let fractional = val.value % (1 << 32);
+
fun spec_ceil(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
    let one = 1 << 32;
    if (fractional == 0) {
-       val.value >> 32
+       self.value >> 32
    } else {
-       (val.value - fractional + one) >> 32
+       (self.value - fractional + one) >> 32
    }
 }
 
@@ -850,7 +850,7 @@ Returns the value of a FixedPoint32 to the nearest integer. ### Function `round` -
public fun round(num: fixed_point32::FixedPoint32): u64
+
public fun round(self: fixed_point32::FixedPoint32): u64
 
@@ -859,7 +859,7 @@ Returns the value of a FixedPoint32 to the nearest integer.
pragma verify_duration_estimate = 120;
 pragma opaque;
 aborts_if false;
-ensures result == spec_round(num);
+ensures result == spec_round(self);
 
@@ -868,14 +868,14 @@ Returns the value of a FixedPoint32 to the nearest integer. -
fun spec_round(val: FixedPoint32): u64 {
-   let fractional = val.value % (1 << 32);
+
fun spec_round(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
    let boundary = (1 << 32) / 2;
    let one = 1 << 32;
    if (fractional < boundary) {
-       (val.value - fractional) >> 32
+       (self.value - fractional) >> 32
    } else {
-       (val.value - fractional + one) >> 32
+       (self.value - fractional + one) >> 32
    }
 }
 
diff --git a/aptos-move/framework/move-stdlib/doc/option.md b/aptos-move/framework/move-stdlib/doc/option.md index 914a948f5c978..5da326c846261 100644 --- a/aptos-move/framework/move-stdlib/doc/option.md +++ b/aptos-move/framework/move-stdlib/doc/option.md @@ -208,10 +208,10 @@ Return an Option containi ## Function `is_none` -Return true if t does not hold a value +Return true if self does not hold a value -
public fun is_none<Element>(t: &option::Option<Element>): bool
+
public fun is_none<Element>(self: &option::Option<Element>): bool
 
@@ -220,8 +220,8 @@ Return true if t does not hold a value Implementation -
public fun is_none<Element>(t: &Option<Element>): bool {
-    vector::is_empty(&t.vec)
+
public fun is_none<Element>(self: &Option<Element>): bool {
+    vector::is_empty(&self.vec)
 }
 
@@ -233,10 +233,10 @@ Return true if t does not hold a value ## Function `is_some` -Return true if t holds a value +Return true if self holds a value -
public fun is_some<Element>(t: &option::Option<Element>): bool
+
public fun is_some<Element>(self: &option::Option<Element>): bool
 
@@ -245,8 +245,8 @@ Return true if t holds a value Implementation -
public fun is_some<Element>(t: &Option<Element>): bool {
-    !vector::is_empty(&t.vec)
+
public fun is_some<Element>(self: &Option<Element>): bool {
+    !vector::is_empty(&self.vec)
 }
 
@@ -258,11 +258,11 @@ Return true if t holds a value ## Function `contains` -Return true if the value in t is equal to e_ref -Always returns false if t does not hold a value +Return true if the value in self is equal to e_ref +Always returns false if self does not hold a value -
public fun contains<Element>(t: &option::Option<Element>, e_ref: &Element): bool
+
public fun contains<Element>(self: &option::Option<Element>, e_ref: &Element): bool
 
@@ -271,8 +271,8 @@ Always returns false if t does not hold a value Implementation -
public fun contains<Element>(t: &Option<Element>, e_ref: &Element): bool {
-    vector::contains(&t.vec, e_ref)
+
public fun contains<Element>(self: &Option<Element>, e_ref: &Element): bool {
+    vector::contains(&self.vec, e_ref)
 }
 
@@ -284,11 +284,11 @@ Always returns false if t does not hold a value ## Function `borrow` -Return an immutable reference to the value inside t -Aborts if t does not hold a value +Return an immutable reference to the value inside self +Aborts if self does not hold a value -
public fun borrow<Element>(t: &option::Option<Element>): &Element
+
public fun borrow<Element>(self: &option::Option<Element>): &Element
 
@@ -297,9 +297,9 @@ Aborts if t does not hold a value Implementation -
public fun borrow<Element>(t: &Option<Element>): &Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    vector::borrow(&t.vec, 0)
+
public fun borrow<Element>(self: &Option<Element>): &Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    vector::borrow(&self.vec, 0)
 }
 
@@ -311,11 +311,11 @@ Aborts if t does not hold a value ## Function `borrow_with_default` -Return a reference to the value inside t if it holds one -Return default_ref if t does not hold a value +Return a reference to the value inside self if it holds one +Return default_ref if self does not hold a value -
public fun borrow_with_default<Element>(t: &option::Option<Element>, default_ref: &Element): &Element
+
public fun borrow_with_default<Element>(self: &option::Option<Element>, default_ref: &Element): &Element
 
@@ -324,8 +324,8 @@ Return default_ref if t does not hold a value Implementation -
public fun borrow_with_default<Element>(t: &Option<Element>, default_ref: &Element): &Element {
-    let vec_ref = &t.vec;
+
public fun borrow_with_default<Element>(self: &Option<Element>, default_ref: &Element): &Element {
+    let vec_ref = &self.vec;
     if (vector::is_empty(vec_ref)) default_ref
     else vector::borrow(vec_ref, 0)
 }
@@ -339,11 +339,11 @@ Return default_ref if t does not hold a value
 
 ## Function `get_with_default`
 
-Return the value inside t if it holds one
-Return default if t does not hold a value
+Return the value inside self if it holds one
+Return default if self does not hold a value
 
 
-
public fun get_with_default<Element: copy, drop>(t: &option::Option<Element>, default: Element): Element
+
public fun get_with_default<Element: copy, drop>(self: &option::Option<Element>, default: Element): Element
 
@@ -353,10 +353,10 @@ Return default if t does not hold a value
public fun get_with_default<Element: copy + drop>(
-    t: &Option<Element>,
+    self: &Option<Element>,
     default: Element,
 ): Element {
-    let vec_ref = &t.vec;
+    let vec_ref = &self.vec;
     if (vector::is_empty(vec_ref)) default
     else *vector::borrow(vec_ref, 0)
 }
@@ -370,11 +370,11 @@ Return default if t does not hold a value
 
 ## Function `fill`
 
-Convert the none option t to a some option by adding e.
-Aborts if t already holds a value
+Convert the none option self to a some option by adding e.
+Aborts if self already holds a value
 
 
-
public fun fill<Element>(t: &mut option::Option<Element>, e: Element)
+
public fun fill<Element>(self: &mut option::Option<Element>, e: Element)
 
@@ -383,8 +383,8 @@ Aborts if t already holds a value Implementation -
public fun fill<Element>(t: &mut Option<Element>, e: Element) {
-    let vec_ref = &mut t.vec;
+
public fun fill<Element>(self: &mut Option<Element>, e: Element) {
+    let vec_ref = &mut self.vec;
     if (vector::is_empty(vec_ref)) vector::push_back(vec_ref, e)
     else abort EOPTION_IS_SET
 }
@@ -398,11 +398,11 @@ Aborts if t already holds a value
 
 ## Function `extract`
 
-Convert a some option to a none by removing and returning the value stored inside t
-Aborts if t does not hold a value
+Convert a some option to a none by removing and returning the value stored inside self
+Aborts if self does not hold a value
 
 
-
public fun extract<Element>(t: &mut option::Option<Element>): Element
+
public fun extract<Element>(self: &mut option::Option<Element>): Element
 
@@ -411,9 +411,9 @@ Aborts if t does not hold a value Implementation -
public fun extract<Element>(t: &mut Option<Element>): Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    vector::pop_back(&mut t.vec)
+
public fun extract<Element>(self: &mut Option<Element>): Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    vector::pop_back(&mut self.vec)
 }
 
@@ -425,11 +425,11 @@ Aborts if t does not hold a value ## Function `borrow_mut` -Return a mutable reference to the value inside t -Aborts if t does not hold a value +Return a mutable reference to the value inside self +Aborts if self does not hold a value -
public fun borrow_mut<Element>(t: &mut option::Option<Element>): &mut Element
+
public fun borrow_mut<Element>(self: &mut option::Option<Element>): &mut Element
 
@@ -438,9 +438,9 @@ Aborts if t does not hold a value Implementation -
public fun borrow_mut<Element>(t: &mut Option<Element>): &mut Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    vector::borrow_mut(&mut t.vec, 0)
+
public fun borrow_mut<Element>(self: &mut Option<Element>): &mut Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    vector::borrow_mut(&mut self.vec, 0)
 }
 
@@ -452,11 +452,11 @@ Aborts if t does not hold a value ## Function `swap` -Swap the old value inside t with e and return the old value -Aborts if t does not hold a value +Swap the old value inside self with e and return the old value +Aborts if self does not hold a value -
public fun swap<Element>(t: &mut option::Option<Element>, e: Element): Element
+
public fun swap<Element>(self: &mut option::Option<Element>, e: Element): Element
 
@@ -465,9 +465,9 @@ Aborts if t does not hold a value Implementation -
public fun swap<Element>(t: &mut Option<Element>, e: Element): Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    let vec_ref = &mut t.vec;
+
public fun swap<Element>(self: &mut Option<Element>, e: Element): Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    let vec_ref = &mut self.vec;
     let old_value = vector::pop_back(vec_ref);
     vector::push_back(vec_ref, e);
     old_value
@@ -482,12 +482,12 @@ Aborts if t does not hold a value
 
 ## Function `swap_or_fill`
 
-Swap the old value inside t with e and return the old value;
+Swap the old value inside self with e and return the old value;
 or if there is no old value, fill it with e.
-Different from swap(), swap_or_fill() allows for t not holding a value.
+Different from swap(), swap_or_fill() allows for self not holding a value.
 
 
-
public fun swap_or_fill<Element>(t: &mut option::Option<Element>, e: Element): option::Option<Element>
+
public fun swap_or_fill<Element>(self: &mut option::Option<Element>, e: Element): option::Option<Element>
 
@@ -496,8 +496,8 @@ Different from swap(), swap_or_fill() allows for t not holding a va Implementation -
public fun swap_or_fill<Element>(t: &mut Option<Element>, e: Element): Option<Element> {
-    let vec_ref = &mut t.vec;
+
public fun swap_or_fill<Element>(self: &mut Option<Element>, e: Element): Option<Element> {
+    let vec_ref = &mut self.vec;
     let old_value = if (vector::is_empty(vec_ref)) none()
         else some(vector::pop_back(vec_ref));
     vector::push_back(vec_ref, e);
@@ -513,10 +513,10 @@ Different from swap(), swap_or_fill() allows for t not holding a va
 
 ## Function `destroy_with_default`
 
-Destroys t. If t holds a value, return it. Returns default otherwise
+Destroys self. If self holds a value, return it. Returns default otherwise
 
 
-
public fun destroy_with_default<Element: drop>(t: option::Option<Element>, default: Element): Element
+
public fun destroy_with_default<Element: drop>(self: option::Option<Element>, default: Element): Element
 
@@ -525,8 +525,8 @@ Destroys t. If t holds a value, return it. Returns Implementation -
public fun destroy_with_default<Element: drop>(t: Option<Element>, default: Element): Element {
-    let Option { vec } = t;
+
public fun destroy_with_default<Element: drop>(self: Option<Element>, default: Element): Element {
+    let Option { vec } = self;
     if (vector::is_empty(&mut vec)) default
     else vector::pop_back(&mut vec)
 }
@@ -540,11 +540,11 @@ Destroys t. If t holds a value, return it. Returns t and return its contents
-Aborts if t does not hold a value
+Unpack self and return its contents
+Aborts if self does not hold a value
 
 
-
public fun destroy_some<Element>(t: option::Option<Element>): Element
+
public fun destroy_some<Element>(self: option::Option<Element>): Element
 
@@ -553,9 +553,9 @@ Aborts if t does not hold a value Implementation -
public fun destroy_some<Element>(t: Option<Element>): Element {
-    assert!(is_some(&t), EOPTION_NOT_SET);
-    let Option { vec } = t;
+
public fun destroy_some<Element>(self: Option<Element>): Element {
+    assert!(is_some(&self), EOPTION_NOT_SET);
+    let Option { vec } = self;
     let elem = vector::pop_back(&mut vec);
     vector::destroy_empty(vec);
     elem
@@ -570,11 +570,11 @@ Aborts if t does not hold a value
 
 ## Function `destroy_none`
 
-Unpack t
-Aborts if t holds a value
+Unpack self
+Aborts if self holds a value
 
 
-
public fun destroy_none<Element>(t: option::Option<Element>)
+
public fun destroy_none<Element>(self: option::Option<Element>)
 
@@ -583,9 +583,9 @@ Aborts if t holds a value Implementation -
public fun destroy_none<Element>(t: Option<Element>) {
-    assert!(is_none(&t), EOPTION_IS_SET);
-    let Option { vec } = t;
+
public fun destroy_none<Element>(self: Option<Element>) {
+    assert!(is_none(&self), EOPTION_IS_SET);
+    let Option { vec } = self;
     vector::destroy_empty(vec)
 }
 
@@ -598,11 +598,11 @@ Aborts if t holds a value ## Function `to_vec` -Convert t into a vector of length 1 if it is Some, +Convert self into a vector of length 1 if it is Some, and an empty vector otherwise -
public fun to_vec<Element>(t: option::Option<Element>): vector<Element>
+
public fun to_vec<Element>(self: option::Option<Element>): vector<Element>
 
@@ -611,8 +611,8 @@ and an empty vector otherwise Implementation -
public fun to_vec<Element>(t: Option<Element>): vector<Element> {
-    let Option { vec } = t;
+
public fun to_vec<Element>(self: Option<Element>): vector<Element> {
+    let Option { vec } = self;
     vec
 }
 
@@ -628,7 +628,7 @@ and an empty vector otherwise Apply the function to the optional element, consuming it. Does nothing if no value present. -
public fun for_each<Element>(o: option::Option<Element>, f: |Element|)
+
public fun for_each<Element>(self: option::Option<Element>, f: |Element|)
 
@@ -637,11 +637,11 @@ Apply the function to the optional element, consuming it. Does nothing if no val Implementation -
public inline fun for_each<Element>(o: Option<Element>, f: |Element|) {
-    if (is_some(&o)) {
-        f(destroy_some(o))
+
public inline fun for_each<Element>(self: Option<Element>, f: |Element|) {
+    if (is_some(&self)) {
+        f(destroy_some(self))
     } else {
-        destroy_none(o)
+        destroy_none(self)
     }
 }
 
@@ -657,7 +657,7 @@ Apply the function to the optional element, consuming it. Does nothing if no val Apply the function to the optional element reference. Does nothing if no value present. -
public fun for_each_ref<Element>(o: &option::Option<Element>, f: |&Element|)
+
public fun for_each_ref<Element>(self: &option::Option<Element>, f: |&Element|)
 
@@ -666,9 +666,9 @@ Apply the function to the optional element reference. Does nothing if no value p Implementation -
public inline fun for_each_ref<Element>(o: &Option<Element>, f: |&Element|) {
-    if (is_some(o)) {
-        f(borrow(o))
+
public inline fun for_each_ref<Element>(self: &Option<Element>, f: |&Element|) {
+    if (is_some(self)) {
+        f(borrow(self))
     }
 }
 
@@ -684,7 +684,7 @@ Apply the function to the optional element reference. Does nothing if no value p Apply the function to the optional element reference. Does nothing if no value present. -
public fun for_each_mut<Element>(o: &mut option::Option<Element>, f: |&mut Element|)
+
public fun for_each_mut<Element>(self: &mut option::Option<Element>, f: |&mut Element|)
 
@@ -693,9 +693,9 @@ Apply the function to the optional element reference. Does nothing if no value p Implementation -
public inline fun for_each_mut<Element>(o: &mut Option<Element>, f: |&mut Element|) {
-    if (is_some(o)) {
-        f(borrow_mut(o))
+
public inline fun for_each_mut<Element>(self: &mut Option<Element>, f: |&mut Element|) {
+    if (is_some(self)) {
+        f(borrow_mut(self))
     }
 }
 
@@ -711,7 +711,7 @@ Apply the function to the optional element reference. Does nothing if no value p Folds the function over the optional element. -
public fun fold<Accumulator, Element>(o: option::Option<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
+
public fun fold<Accumulator, Element>(self: option::Option<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
 
@@ -721,14 +721,14 @@ Folds the function over the optional element.
public inline fun fold<Accumulator, Element>(
-    o: Option<Element>,
+    self: Option<Element>,
     init: Accumulator,
     f: |Accumulator,Element|Accumulator
 ): Accumulator {
-    if (is_some(&o)) {
-        f(init, destroy_some(o))
+    if (is_some(&self)) {
+        f(init, destroy_some(self))
     } else {
-        destroy_none(o);
+        destroy_none(self);
         init
     }
 }
@@ -745,7 +745,7 @@ Folds the function over the optional element.
 Maps the content of an option.
 
 
-
public fun map<Element, OtherElement>(o: option::Option<Element>, f: |Element|OtherElement): option::Option<OtherElement>
+
public fun map<Element, OtherElement>(self: option::Option<Element>, f: |Element|OtherElement): option::Option<OtherElement>
 
@@ -754,11 +754,11 @@ Maps the content of an option. Implementation -
public inline fun map<Element, OtherElement>(o: Option<Element>, f: |Element|OtherElement): Option<OtherElement> {
-    if (is_some(&o)) {
-        some(f(destroy_some(o)))
+
public inline fun map<Element, OtherElement>(self: Option<Element>, f: |Element|OtherElement): Option<OtherElement> {
+    if (is_some(&self)) {
+        some(f(destroy_some(self)))
     } else {
-        destroy_none(o);
+        destroy_none(self);
         none()
     }
 }
@@ -775,7 +775,7 @@ Maps the content of an option.
 Maps the content of an option without destroying the original option.
 
 
-
public fun map_ref<Element, OtherElement>(o: &option::Option<Element>, f: |&Element|OtherElement): option::Option<OtherElement>
+
public fun map_ref<Element, OtherElement>(self: &option::Option<Element>, f: |&Element|OtherElement): option::Option<OtherElement>
 
@@ -785,9 +785,9 @@ Maps the content of an option without destroying the original option.
public inline fun map_ref<Element, OtherElement>(
-    o: &Option<Element>, f: |&Element|OtherElement): Option<OtherElement> {
-    if (is_some(o)) {
-        some(f(borrow(o)))
+    self: &Option<Element>, f: |&Element|OtherElement): Option<OtherElement> {
+    if (is_some(self)) {
+        some(f(borrow(self)))
     } else {
         none()
     }
@@ -805,7 +805,7 @@ Maps the content of an option without destroying the original option.
 Filters the content of an option
 
 
-
public fun filter<Element: drop>(o: option::Option<Element>, f: |&Element|bool): option::Option<Element>
+
public fun filter<Element: drop>(self: option::Option<Element>, f: |&Element|bool): option::Option<Element>
 
@@ -814,9 +814,9 @@ Filters the content of an option Implementation -
public inline fun filter<Element:drop>(o: Option<Element>, f: |&Element|bool): Option<Element> {
-    if (is_some(&o) && f(borrow(&o))) {
-        o
+
public inline fun filter<Element:drop>(self: Option<Element>, f: |&Element|bool): Option<Element> {
+    if (is_some(&self) && f(borrow(&self))) {
+        self
     } else {
         none()
     }
@@ -834,7 +834,7 @@ Filters the content of an option
 Returns true if the option contains an element which satisfies predicate.
 
 
-
public fun any<Element>(o: &option::Option<Element>, p: |&Element|bool): bool
+
public fun any<Element>(self: &option::Option<Element>, p: |&Element|bool): bool
 
@@ -843,8 +843,8 @@ Returns true if the option contains an element which satisfies predicate. Implementation -
public inline fun any<Element>(o: &Option<Element>, p: |&Element|bool): bool {
-    is_some(o) && p(borrow(o))
+
public inline fun any<Element>(self: &Option<Element>, p: |&Element|bool): bool {
+    is_some(self) && p(borrow(self))
 }
 
@@ -859,7 +859,7 @@ Returns true if the option contains an element which satisfies predicate. Utility function to destroy an option that is not droppable. -
public fun destroy<Element>(o: option::Option<Element>, d: |Element|)
+
public fun destroy<Element>(self: option::Option<Element>, d: |Element|)
 
@@ -868,8 +868,8 @@ Utility function to destroy an option that is not droppable. Implementation -
public inline fun destroy<Element>(o: Option<Element>, d: |Element|) {
-    let vec = to_vec(o);
+
public inline fun destroy<Element>(self: Option<Element>, d: |Element|) {
+    let vec = to_vec(self);
     vector::destroy(vec, |e| d(e));
 }
 
@@ -900,8 +900,8 @@ Utility function to destroy an option that is not droppable.
schema AbortsIfNone<Element> {
-    t: Option<Element>;
-    aborts_if spec_is_none(t) with EOPTION_NOT_SET;
+    self: Option<Element>;
+    aborts_if spec_is_none(self) with EOPTION_NOT_SET;
 }
 
@@ -1015,7 +1015,7 @@ because it's 0 for "none" or 1 for "some". ### Function `is_none` -
public fun is_none<Element>(t: &option::Option<Element>): bool
+
public fun is_none<Element>(self: &option::Option<Element>): bool
 
@@ -1023,7 +1023,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == spec_is_none(t);
+ensures result == spec_is_none(self);
 
@@ -1032,8 +1032,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_is_none<Element>(t: Option<Element>): bool {
-   vector::is_empty(t.vec)
+
fun spec_is_none<Element>(self: Option<Element>): bool {
+   vector::is_empty(self.vec)
 }
 
@@ -1044,7 +1044,7 @@ because it's 0 for "none" or 1 for "some". ### Function `is_some` -
public fun is_some<Element>(t: &option::Option<Element>): bool
+
public fun is_some<Element>(self: &option::Option<Element>): bool
 
@@ -1052,7 +1052,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == spec_is_some(t);
+ensures result == spec_is_some(self);
 
@@ -1061,8 +1061,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_is_some<Element>(t: Option<Element>): bool {
-   !vector::is_empty(t.vec)
+
fun spec_is_some<Element>(self: Option<Element>): bool {
+   !vector::is_empty(self.vec)
 }
 
@@ -1073,7 +1073,7 @@ because it's 0 for "none" or 1 for "some". ### Function `contains` -
public fun contains<Element>(t: &option::Option<Element>, e_ref: &Element): bool
+
public fun contains<Element>(self: &option::Option<Element>, e_ref: &Element): bool
 
@@ -1081,7 +1081,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == spec_contains(t, e_ref);
+ensures result == spec_contains(self, e_ref);
 
@@ -1090,8 +1090,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_contains<Element>(t: Option<Element>, e: Element): bool {
-   is_some(t) && borrow(t) == e
+
fun spec_contains<Element>(self: Option<Element>, e: Element): bool {
+   is_some(self) && borrow(self) == e
 }
 
@@ -1102,7 +1102,7 @@ because it's 0 for "none" or 1 for "some". ### Function `borrow` -
public fun borrow<Element>(t: &option::Option<Element>): &Element
+
public fun borrow<Element>(self: &option::Option<Element>): &Element
 
@@ -1110,7 +1110,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(t);
+ensures result == spec_borrow(self);
 
@@ -1119,8 +1119,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_borrow<Element>(t: Option<Element>): Element {
-   t.vec[0]
+
fun spec_borrow<Element>(self: Option<Element>): Element {
+   self.vec[0]
 }
 
@@ -1131,7 +1131,7 @@ because it's 0 for "none" or 1 for "some". ### Function `borrow_with_default` -
public fun borrow_with_default<Element>(t: &option::Option<Element>, default_ref: &Element): &Element
+
public fun borrow_with_default<Element>(self: &option::Option<Element>, default_ref: &Element): &Element
 
@@ -1139,7 +1139,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == (if (spec_is_some(t)) spec_borrow(t) else default_ref);
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default_ref);
 
@@ -1149,7 +1149,7 @@ because it's 0 for "none" or 1 for "some". ### Function `get_with_default` -
public fun get_with_default<Element: copy, drop>(t: &option::Option<Element>, default: Element): Element
+
public fun get_with_default<Element: copy, drop>(self: &option::Option<Element>, default: Element): Element
 
@@ -1157,7 +1157,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == (if (spec_is_some(t)) spec_borrow(t) else default);
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default);
 
@@ -1167,16 +1167,16 @@ because it's 0 for "none" or 1 for "some". ### Function `fill` -
public fun fill<Element>(t: &mut option::Option<Element>, e: Element)
+
public fun fill<Element>(self: &mut option::Option<Element>, e: Element)
 
pragma opaque;
-aborts_if spec_is_some(t) with EOPTION_IS_SET;
-ensures spec_is_some(t);
-ensures spec_borrow(t) == e;
+aborts_if spec_is_some(self) with EOPTION_IS_SET;
+ensures spec_is_some(self);
+ensures spec_borrow(self) == e;
 
@@ -1186,7 +1186,7 @@ because it's 0 for "none" or 1 for "some". ### Function `extract` -
public fun extract<Element>(t: &mut option::Option<Element>): Element
+
public fun extract<Element>(self: &mut option::Option<Element>): Element
 
@@ -1194,8 +1194,8 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(old(t));
-ensures spec_is_none(t);
+ensures result == spec_borrow(old(self));
+ensures spec_is_none(self);
 
@@ -1205,15 +1205,15 @@ because it's 0 for "none" or 1 for "some". ### Function `borrow_mut` -
public fun borrow_mut<Element>(t: &mut option::Option<Element>): &mut Element
+
public fun borrow_mut<Element>(self: &mut option::Option<Element>): &mut Element
 
include AbortsIfNone<Element>;
-ensures result == spec_borrow(t);
-ensures t == old(t);
+ensures result == spec_borrow(self);
+ensures self == old(self);
 
@@ -1223,7 +1223,7 @@ because it's 0 for "none" or 1 for "some". ### Function `swap` -
public fun swap<Element>(t: &mut option::Option<Element>, e: Element): Element
+
public fun swap<Element>(self: &mut option::Option<Element>, e: Element): Element
 
@@ -1231,9 +1231,9 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(old(t));
-ensures spec_is_some(t);
-ensures spec_borrow(t) == e;
+ensures result == spec_borrow(old(self));
+ensures spec_is_some(self);
+ensures spec_borrow(self) == e;
 
@@ -1243,7 +1243,7 @@ because it's 0 for "none" or 1 for "some". ### Function `swap_or_fill` -
public fun swap_or_fill<Element>(t: &mut option::Option<Element>, e: Element): option::Option<Element>
+
public fun swap_or_fill<Element>(self: &mut option::Option<Element>, e: Element): option::Option<Element>
 
@@ -1251,8 +1251,8 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == old(t);
-ensures spec_borrow(t) == e;
+ensures result == old(self);
+ensures spec_borrow(self) == e;
 
@@ -1262,7 +1262,7 @@ because it's 0 for "none" or 1 for "some". ### Function `destroy_with_default` -
public fun destroy_with_default<Element: drop>(t: option::Option<Element>, default: Element): Element
+
public fun destroy_with_default<Element: drop>(self: option::Option<Element>, default: Element): Element
 
@@ -1270,7 +1270,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == (if (spec_is_some(t)) spec_borrow(t) else default);
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default);
 
@@ -1280,7 +1280,7 @@ because it's 0 for "none" or 1 for "some". ### Function `destroy_some` -
public fun destroy_some<Element>(t: option::Option<Element>): Element
+
public fun destroy_some<Element>(self: option::Option<Element>): Element
 
@@ -1288,7 +1288,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(t);
+ensures result == spec_borrow(self);
 
@@ -1298,14 +1298,14 @@ because it's 0 for "none" or 1 for "some". ### Function `destroy_none` -
public fun destroy_none<Element>(t: option::Option<Element>)
+
public fun destroy_none<Element>(self: option::Option<Element>)
 
pragma opaque;
-aborts_if spec_is_some(t) with EOPTION_IS_SET;
+aborts_if spec_is_some(self) with EOPTION_IS_SET;
 
@@ -1315,7 +1315,7 @@ because it's 0 for "none" or 1 for "some". ### Function `to_vec` -
public fun to_vec<Element>(t: option::Option<Element>): vector<Element>
+
public fun to_vec<Element>(self: option::Option<Element>): vector<Element>
 
@@ -1323,7 +1323,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == t.vec;
+ensures result == self.vec;
 
diff --git a/aptos-move/framework/move-stdlib/doc/string.md b/aptos-move/framework/move-stdlib/doc/string.md index b45c55afe9902..319b862698c08 100644 --- a/aptos-move/framework/move-stdlib/doc/string.md +++ b/aptos-move/framework/move-stdlib/doc/string.md @@ -150,7 +150,7 @@ Tries to create a new string from a sequence of bytes. Returns a reference to the underlying byte vector. -
public fun bytes(s: &string::String): &vector<u8>
+
public fun bytes(self: &string::String): &vector<u8>
 
@@ -159,8 +159,8 @@ Returns a reference to the underlying byte vector. Implementation -
public fun bytes(s: &String): &vector<u8> {
-    &s.bytes
+
public fun bytes(self: &String): &vector<u8> {
+    &self.bytes
 }
 
@@ -175,7 +175,7 @@ Returns a reference to the underlying byte vector. Checks whether this string is empty. -
public fun is_empty(s: &string::String): bool
+
public fun is_empty(self: &string::String): bool
 
@@ -184,8 +184,8 @@ Checks whether this string is empty. Implementation -
public fun is_empty(s: &String): bool {
-    vector::is_empty(&s.bytes)
+
public fun is_empty(self: &String): bool {
+    vector::is_empty(&self.bytes)
 }
 
@@ -200,7 +200,7 @@ Checks whether this string is empty. Returns the length of this string, in bytes. -
public fun length(s: &string::String): u64
+
public fun length(self: &string::String): u64
 
@@ -209,8 +209,8 @@ Returns the length of this string, in bytes. Implementation -
public fun length(s: &String): u64 {
-    vector::length(&s.bytes)
+
public fun length(self: &String): u64 {
+    vector::length(&self.bytes)
 }
 
@@ -225,7 +225,7 @@ Returns the length of this string, in bytes. Appends a string. -
public fun append(s: &mut string::String, r: string::String)
+
public fun append(self: &mut string::String, r: string::String)
 
@@ -234,8 +234,8 @@ Appends a string. Implementation -
public fun append(s: &mut String, r: String) {
-    vector::append(&mut s.bytes, r.bytes)
+
public fun append(self: &mut String, r: String) {
+    vector::append(&mut self.bytes, r.bytes)
 }
 
@@ -250,7 +250,7 @@ Appends a string. Appends bytes which must be in valid utf8 format. -
public fun append_utf8(s: &mut string::String, bytes: vector<u8>)
+
public fun append_utf8(self: &mut string::String, bytes: vector<u8>)
 
@@ -259,8 +259,8 @@ Appends bytes which must be in valid utf8 format. Implementation -
public fun append_utf8(s: &mut String, bytes: vector<u8>) {
-    append(s, utf8(bytes))
+
public fun append_utf8(self: &mut String, bytes: vector<u8>) {
+    append(self, utf8(bytes))
 }
 
@@ -276,7 +276,7 @@ Insert the other string at the byte index in given string. The index must be at boundary. -
public fun insert(s: &mut string::String, at: u64, o: string::String)
+
public fun insert(self: &mut string::String, at: u64, o: string::String)
 
@@ -285,15 +285,15 @@ boundary. Implementation -
public fun insert(s: &mut String, at: u64, o: String) {
-    let bytes = &s.bytes;
+
public fun insert(self: &mut String, at: u64, o: String) {
+    let bytes = &self.bytes;
     assert!(at <= vector::length(bytes) && internal_is_char_boundary(bytes, at), EINVALID_INDEX);
-    let l = length(s);
-    let front = sub_string(s, 0, at);
-    let end = sub_string(s, at, l);
+    let l = length(self);
+    let front = sub_string(self, 0, at);
+    let end = sub_string(self, at, l);
     append(&mut front, o);
     append(&mut front, end);
-    *s = front;
+    *self = front;
 }
 
@@ -310,7 +310,7 @@ of the first byte not included (or the length of the string). The indices must b guaranteeing that the result is valid utf8. -
public fun sub_string(s: &string::String, i: u64, j: u64): string::String
+
public fun sub_string(self: &string::String, i: u64, j: u64): string::String
 
@@ -319,8 +319,8 @@ guaranteeing that the result is valid utf8. Implementation -
public fun sub_string(s: &String, i: u64, j: u64): String {
-    let bytes = &s.bytes;
+
public fun sub_string(self: &String, i: u64, j: u64): String {
+    let bytes = &self.bytes;
     let l = vector::length(bytes);
     assert!(
         j <= l && i <= j && internal_is_char_boundary(bytes, i) && internal_is_char_boundary(bytes, j),
@@ -341,7 +341,7 @@ guaranteeing that the result is valid utf8.
 Computes the index of the first occurrence of a string. Returns length(s) if no occurrence found.
 
 
-
public fun index_of(s: &string::String, r: &string::String): u64
+
public fun index_of(self: &string::String, r: &string::String): u64
 
@@ -350,8 +350,8 @@ Computes the index of the first occurrence of a string. Returns index_of(s: &String, r: &String): u64 { - internal_index_of(&s.bytes, &r.bytes) +
public fun index_of(self: &String, r: &String): u64 {
+    internal_index_of(&self.bytes, &r.bytes)
 }
 
diff --git a/aptos-move/framework/move-stdlib/doc/vector.md b/aptos-move/framework/move-stdlib/doc/vector.md index eee7cac04edc8..d5e6a7bfa2ecd 100644 --- a/aptos-move/framework/move-stdlib/doc/vector.md +++ b/aptos-move/framework/move-stdlib/doc/vector.md @@ -177,7 +177,7 @@ Return the length of the vector.
#[bytecode_instruction]
-public fun length<Element>(v: &vector<Element>): u64
+public fun length<Element>(self: &vector<Element>): u64
 
@@ -186,7 +186,7 @@ Return the length of the vector. Implementation -
native public fun length<Element>(v: &vector<Element>): u64;
+
native public fun length<Element>(self: &vector<Element>): u64;
 
@@ -197,12 +197,12 @@ Return the length of the vector. ## Function `borrow` -Acquire an immutable reference to the ith element of the vector v. +Acquire an immutable reference to the ith element of the vector self. Aborts if i is out of bounds.
#[bytecode_instruction]
-public fun borrow<Element>(v: &vector<Element>, i: u64): &Element
+public fun borrow<Element>(self: &vector<Element>, i: u64): &Element
 
@@ -211,7 +211,7 @@ Aborts if i is out of bounds. Implementation -
native public fun borrow<Element>(v: &vector<Element>, i: u64): ∈
+
native public fun borrow<Element>(self: &vector<Element>, i: u64): ∈
 
@@ -222,11 +222,11 @@ Aborts if i is out of bounds. ## Function `push_back` -Add element e to the end of the vector v. +Add element e to the end of the vector self.
#[bytecode_instruction]
-public fun push_back<Element>(v: &mut vector<Element>, e: Element)
+public fun push_back<Element>(self: &mut vector<Element>, e: Element)
 
@@ -235,7 +235,7 @@ Add element e to the end of the vector v. Implementation -
native public fun push_back<Element>(v: &mut vector<Element>, e: Element);
+
native public fun push_back<Element>(self: &mut vector<Element>, e: Element);
 
@@ -246,12 +246,12 @@ Add element e to the end of the vector v. ## Function `borrow_mut` -Return a mutable reference to the ith element in the vector v. +Return a mutable reference to the ith element in the vector self. Aborts if i is out of bounds.
#[bytecode_instruction]
-public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element
+public fun borrow_mut<Element>(self: &mut vector<Element>, i: u64): &mut Element
 
@@ -260,7 +260,7 @@ Aborts if i is out of bounds. Implementation -
native public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element;
+
native public fun borrow_mut<Element>(self: &mut vector<Element>, i: u64): &mut Element;
 
@@ -271,12 +271,12 @@ Aborts if i is out of bounds. ## Function `pop_back` -Pop an element from the end of vector v. -Aborts if v is empty. +Pop an element from the end of vector self. +Aborts if self is empty.
#[bytecode_instruction]
-public fun pop_back<Element>(v: &mut vector<Element>): Element
+public fun pop_back<Element>(self: &mut vector<Element>): Element
 
@@ -285,7 +285,7 @@ Aborts if v is empty. Implementation -
native public fun pop_back<Element>(v: &mut vector<Element>): Element;
+
native public fun pop_back<Element>(self: &mut vector<Element>): Element;
 
@@ -296,12 +296,12 @@ Aborts if v is empty. ## Function `destroy_empty` -Destroy the vector v. -Aborts if v is not empty. +Destroy the vector self. +Aborts if self is not empty.
#[bytecode_instruction]
-public fun destroy_empty<Element>(v: vector<Element>)
+public fun destroy_empty<Element>(self: vector<Element>)
 
@@ -310,7 +310,7 @@ Aborts if v is not empty. Implementation -
native public fun destroy_empty<Element>(v: vector<Element>);
+
native public fun destroy_empty<Element>(self: vector<Element>);
 
@@ -321,12 +321,12 @@ Aborts if v is not empty. ## Function `swap` -Swaps the elements at the ith and jth indices in the vector v. +Swaps the elements at the ith and jth indices in the vector self. Aborts if i or j is out of bounds.
#[bytecode_instruction]
-public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64)
+public fun swap<Element>(self: &mut vector<Element>, i: u64, j: u64)
 
@@ -335,7 +335,7 @@ Aborts if i or j is out of bounds. Implementation -
native public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64);
+
native public fun swap<Element>(self: &mut vector<Element>, i: u64, j: u64);
 
@@ -373,10 +373,10 @@ Return an vector of size one containing element e. ## Function `reverse` -Reverses the order of the elements in the vector v in place. +Reverses the order of the elements in the vector self in place. -
public fun reverse<Element>(v: &mut vector<Element>)
+
public fun reverse<Element>(self: &mut vector<Element>)
 
@@ -385,9 +385,9 @@ Reverses the order of the elements in the vector v in place. Implementation -
public fun reverse<Element>(v: &mut vector<Element>) {
-    let len = length(v);
-    reverse_slice(v, 0, len);
+
public fun reverse<Element>(self: &mut vector<Element>) {
+    let len = length(self);
+    reverse_slice(self, 0, len);
 }
 
@@ -399,10 +399,10 @@ Reverses the order of the elements in the vector v in place. ## Function `reverse_slice` -Reverses the order of the elements [left, right) in the vector v in place. +Reverses the order of the elements [left, right) in the vector self in place. -
public fun reverse_slice<Element>(v: &mut vector<Element>, left: u64, right: u64)
+
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64)
 
@@ -411,12 +411,12 @@ Reverses the order of the elements [left, right) in the vector v in Implementation -
public fun reverse_slice<Element>(v: &mut vector<Element>, left: u64, right: u64) {
+
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64) {
     assert!(left <= right, EINVALID_RANGE);
     if (left == right) return;
     right = right - 1;
     while (left < right) {
-        swap(v, left, right);
+        swap(self, left, right);
         left = left + 1;
         right = right - 1;
     }
@@ -431,10 +431,10 @@ Reverses the order of the elements [left, right) in the vector v in
 
 ## Function `append`
 
-Pushes all of the elements of the other vector into the lhs vector.
+Pushes all of the elements of the other vector into the self vector.
 
 
-
public fun append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -443,9 +443,9 @@ Pushes all of the elements of the other vector into the lhsImplementation -
public fun append<Element>(lhs: &mut vector<Element>, other: vector<Element>) {
+
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>) {
     reverse(&mut other);
-    reverse_append(lhs, other);
+    reverse_append(self, other);
 }
 
@@ -457,10 +457,10 @@ Pushes all of the elements of the other vector into the lhsother vector into the lhs vector. +Pushes all of the elements of the other vector into the self vector. -
public fun reverse_append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -469,10 +469,10 @@ Pushes all of the elements of the other vector into the lhsImplementation -
public fun reverse_append<Element>(lhs: &mut vector<Element>, other: vector<Element>) {
+
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>) {
     let len = length(&other);
     while (len > 0) {
-        push_back(lhs, pop_back(&mut other));
+        push_back(self, pop_back(&mut other));
         len = len - 1;
     };
     destroy_empty(other);
@@ -490,7 +490,7 @@ Pushes all of the elements of the other vector into the lhspublic fun trim<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -499,8 +499,8 @@ Trim a vector to a smaller size, returning the evicted elements in order Implementation -
public fun trim<Element>(v: &mut vector<Element>, new_len: u64): vector<Element> {
-    let res = trim_reverse(v, new_len);
+
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element> {
+    let res = trim_reverse(self, new_len);
     reverse(&mut res);
     res
 }
@@ -517,7 +517,7 @@ Trim a vector to a smaller size, returning the evicted elements in order
 Trim a vector to a smaller size, returning the evicted elements in reverse order
 
 
-
public fun trim_reverse<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -526,12 +526,12 @@ Trim a vector to a smaller size, returning the evicted elements in reverse order Implementation -
public fun trim_reverse<Element>(v: &mut vector<Element>, new_len: u64): vector<Element> {
-    let len = length(v);
+
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element> {
+    let len = length(self);
     assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS);
     let result = empty();
     while (new_len < len) {
-        push_back(&mut result, pop_back(v));
+        push_back(&mut result, pop_back(self));
         len = len - 1;
     };
     result
@@ -546,10 +546,10 @@ Trim a vector to a smaller size, returning the evicted elements in reverse order
 
 ## Function `is_empty`
 
-Return true if the vector v has no elements and false otherwise.
+Return true if the vector self has no elements and false otherwise.
 
 
-
public fun is_empty<Element>(v: &vector<Element>): bool
+
public fun is_empty<Element>(self: &vector<Element>): bool
 
@@ -558,8 +558,8 @@ Return true if the vector v has no elements and Implementation -
public fun is_empty<Element>(v: &vector<Element>): bool {
-    length(v) == 0
+
public fun is_empty<Element>(self: &vector<Element>): bool {
+    length(self) == 0
 }
 
@@ -571,10 +571,10 @@ Return true if the vector v has no elements and ## Function `contains` -Return true if e is in the vector v. +Return true if e is in the vector self. -
public fun contains<Element>(v: &vector<Element>, e: &Element): bool
+
public fun contains<Element>(self: &vector<Element>, e: &Element): bool
 
@@ -583,11 +583,11 @@ Return true if e is in the vector v. Implementation -
public fun contains<Element>(v: &vector<Element>, e: &Element): bool {
+
public fun contains<Element>(self: &vector<Element>, e: &Element): bool {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        if (borrow(v, i) == e) return true;
+        if (borrow(self, i) == e) return true;
         i = i + 1;
     };
     false
@@ -602,11 +602,11 @@ Return true if e is in the vector v.
 
 ## Function `index_of`
 
-Return (true, i) if e is in the vector v at index i.
+Return (true, i) if e is in the vector self at index i.
 Otherwise, returns (false, 0).
 
 
-
public fun index_of<Element>(v: &vector<Element>, e: &Element): (bool, u64)
+
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64)
 
@@ -615,11 +615,11 @@ Otherwise, returns (false, 0). Implementation -
public fun index_of<Element>(v: &vector<Element>, e: &Element): (bool, u64) {
+
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64) {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        if (borrow(v, i) == e) return (true, i);
+        if (borrow(self, i) == e) return (true, i);
         i = i + 1;
     };
     (false, 0)
@@ -639,7 +639,7 @@ the predicate, only the index of the first one is returned.
 Otherwise, returns (false, 0).
 
 
-
public fun find<Element>(v: &vector<Element>, f: |&Element|bool): (bool, u64)
+
public fun find<Element>(self: &vector<Element>, f: |&Element|bool): (bool, u64)
 
@@ -648,14 +648,14 @@ Otherwise, returns (false, 0). Implementation -
public inline fun find<Element>(v: &vector<Element>, f: |&Element|bool): (bool, u64) {
+
public inline fun find<Element>(self: &vector<Element>, f: |&Element|bool): (bool, u64) {
     let find = false;
     let found_index = 0;
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
         // Cannot call return in an inline function so we need to resort to break here.
-        if (f(borrow(v, i))) {
+        if (f(borrow(self, i))) {
             find = true;
             found_index = i;
             break
@@ -678,7 +678,7 @@ Insert a new element at position 0 <= i <= length, using O(length - i) time.
 Aborts if out of bounds.
 
 
-
public fun insert<Element>(v: &mut vector<Element>, i: u64, e: Element)
+
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
 
@@ -687,12 +687,12 @@ Aborts if out of bounds. Implementation -
public fun insert<Element>(v: &mut vector<Element>, i: u64, e: Element) {
-    let len = length(v);
+
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element) {
+    let len = length(self);
     assert!(i <= len, EINDEX_OUT_OF_BOUNDS);
-    push_back(v, e);
+    push_back(self, e);
     while (i < len) {
-        swap(v, i, len);
+        swap(self, i, len);
         i = i + 1;
     };
 }
@@ -706,12 +706,12 @@ Aborts if out of bounds.
 
 ## Function `remove`
 
-Remove the ith element of the vector v, shifting all subsequent elements.
+Remove the ith element of the vector self, shifting all subsequent elements.
 This is O(n) and preserves ordering of elements in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -720,14 +720,14 @@ Aborts if i is out of bounds. Implementation -
public fun remove<Element>(v: &mut vector<Element>, i: u64): Element {
-    let len = length(v);
+
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    let len = length(self);
     // i out of bounds; abort
     if (i >= len) abort EINDEX_OUT_OF_BOUNDS;
 
     len = len - 1;
-    while (i < len) swap(v, i, { i = i + 1; i });
-    pop_back(v)
+    while (i < len) swap(self, i, { i = i + 1; i });
+    pop_back(self)
 }
 
@@ -739,7 +739,7 @@ Aborts if i is out of bounds. ## Function `remove_value` -Remove the first occurrence of a given value in the vector v and return it in a vector, shifting all +Remove the first occurrence of a given value in the vector self and return it in a vector, shifting all subsequent elements. This is O(n) and preserves ordering of elements in the vector. This returns an empty vector if the value isn't present in the vector. @@ -747,7 +747,7 @@ Note that this cannot return an option as option uses vector and there'd be a ci and vector. -
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element>
+
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element>
 
@@ -756,12 +756,12 @@ and vector. Implementation -
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element> {
+
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element> {
     // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found,
     // while remove would continue from the identified index to the end of the vector.
-    let (found, index) = index_of(v, val);
+    let (found, index) = index_of(self, val);
     if (found) {
-        vector[remove(v, index)]
+        vector[remove(self, index)]
     } else {
        vector[]
     }
@@ -776,12 +776,12 @@ and vector.
 
 ## Function `swap_remove`
 
-Swap the ith element of the vector v with the last element and then pop the vector.
+Swap the ith element of the vector self with the last element and then pop the vector.
 This is O(1), but does not preserve ordering of elements in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -790,11 +790,11 @@ Aborts if i is out of bounds. Implementation -
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element {
-    assert!(!is_empty(v), EINDEX_OUT_OF_BOUNDS);
-    let last_idx = length(v) - 1;
-    swap(v, i, last_idx);
-    pop_back(v)
+
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    assert!(!is_empty(self), EINDEX_OUT_OF_BOUNDS);
+    let last_idx = length(self) - 1;
+    swap(self, i, last_idx);
+    pop_back(self)
 }
 
@@ -809,7 +809,7 @@ Aborts if i is out of bounds. Apply the function to each element in the vector, consuming it. -
public fun for_each<Element>(v: vector<Element>, f: |Element|)
+
public fun for_each<Element>(self: vector<Element>, f: |Element|)
 
@@ -818,9 +818,9 @@ Apply the function to each element in the vector, consuming it. Implementation -
public inline fun for_each<Element>(v: vector<Element>, f: |Element|) {
-    reverse(&mut v); // We need to reverse the vector to consume it efficiently
-    for_each_reverse(v, |e| f(e));
+
public inline fun for_each<Element>(self: vector<Element>, f: |Element|) {
+    reverse(&mut self); // We need to reverse the vector to consume it efficiently
+    for_each_reverse(self, |e| f(e));
 }
 
@@ -835,7 +835,7 @@ Apply the function to each element in the vector, consuming it. Apply the function to each element in the vector, consuming it. -
public fun for_each_reverse<Element>(v: vector<Element>, f: |Element|)
+
public fun for_each_reverse<Element>(self: vector<Element>, f: |Element|)
 
@@ -844,13 +844,13 @@ Apply the function to each element in the vector, consuming it. Implementation -
public inline fun for_each_reverse<Element>(v: vector<Element>, f: |Element|) {
-    let len = length(&v);
+
public inline fun for_each_reverse<Element>(self: vector<Element>, f: |Element|) {
+    let len = length(&self);
     while (len > 0) {
-        f(pop_back(&mut v));
+        f(pop_back(&mut self));
         len = len - 1;
     };
-    destroy_empty(v)
+    destroy_empty(self)
 }
 
@@ -865,7 +865,7 @@ Apply the function to each element in the vector, consuming it. Apply the function to a reference of each element in the vector. -
public fun for_each_ref<Element>(v: &vector<Element>, f: |&Element|)
+
public fun for_each_ref<Element>(self: &vector<Element>, f: |&Element|)
 
@@ -874,11 +874,11 @@ Apply the function to a reference of each element in the vector. Implementation -
public inline fun for_each_ref<Element>(v: &vector<Element>, f: |&Element|) {
+
public inline fun for_each_ref<Element>(self: &vector<Element>, f: |&Element|) {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        f(borrow(v, i));
+        f(borrow(self, i));
         i = i + 1
     }
 }
@@ -895,7 +895,7 @@ Apply the function to a reference of each element in the vector.
 Apply the function to each pair of elements in the two given vectors, consuming them.
 
 
-
public fun zip<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
+
public fun zip<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
 
@@ -904,11 +904,11 @@ Apply the function to each pair of elements in the two given vectors, consuming Implementation -
public inline fun zip<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |Element1, Element2|) {
+
public inline fun zip<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |Element1, Element2|) {
     // We need to reverse the vectors to consume it efficiently
-    reverse(&mut v1);
+    reverse(&mut self);
     reverse(&mut v2);
-    zip_reverse(v1, v2, |e1, e2| f(e1, e2));
+    zip_reverse(self, v2, |e1, e2| f(e1, e2));
 }
 
@@ -924,7 +924,7 @@ Apply the function to each pair of elements in the two given vectors in the reve This errors out if the vectors are not of the same length. -
public fun zip_reverse<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
+
public fun zip_reverse<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
 
@@ -934,19 +934,19 @@ This errors out if the vectors are not of the same length.
public inline fun zip_reverse<Element1, Element2>(
-    v1: vector<Element1>,
+    self: vector<Element1>,
     v2: vector<Element2>,
     f: |Element1, Element2|,
 ) {
-    let len = length(&v1);
+    let len = length(&self);
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
     assert!(len == length(&v2), 0x20002);
     while (len > 0) {
-        f(pop_back(&mut v1), pop_back(&mut v2));
+        f(pop_back(&mut self), pop_back(&mut v2));
         len = len - 1;
     };
-    destroy_empty(v1);
+    destroy_empty(self);
     destroy_empty(v2);
 }
 
@@ -963,7 +963,7 @@ Apply the function to the references of each pair of elements in the two given v This errors out if the vectors are not of the same length. -
public fun zip_ref<Element1, Element2>(v1: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|)
+
public fun zip_ref<Element1, Element2>(self: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|)
 
@@ -973,17 +973,17 @@ This errors out if the vectors are not of the same length.
public inline fun zip_ref<Element1, Element2>(
-    v1: &vector<Element1>,
+    self: &vector<Element1>,
     v2: &vector<Element2>,
     f: |&Element1, &Element2|,
 ) {
-    let len = length(v1);
+    let len = length(self);
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
     assert!(len == length(v2), 0x20002);
     let i = 0;
     while (i < len) {
-        f(borrow(v1, i), borrow(v2, i));
+        f(borrow(self, i), borrow(v2, i));
         i = i + 1
     }
 }
@@ -1000,7 +1000,7 @@ This errors out if the vectors are not of the same length.
 Apply the function to a reference of each element in the vector with its index.
 
 
-
public fun enumerate_ref<Element>(v: &vector<Element>, f: |(u64, &Element)|)
+
public fun enumerate_ref<Element>(self: &vector<Element>, f: |(u64, &Element)|)
 
@@ -1009,11 +1009,11 @@ Apply the function to a reference of each element in the vector with its index. Implementation -
public inline fun enumerate_ref<Element>(v: &vector<Element>, f: |u64, &Element|) {
+
public inline fun enumerate_ref<Element>(self: &vector<Element>, f: |u64, &Element|) {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        f(i, borrow(v, i));
+        f(i, borrow(self, i));
         i = i + 1;
     };
 }
@@ -1030,7 +1030,7 @@ Apply the function to a reference of each element in the vector with its index.
 Apply the function to a mutable reference to each element in the vector.
 
 
-
public fun for_each_mut<Element>(v: &mut vector<Element>, f: |&mut Element|)
+
public fun for_each_mut<Element>(self: &mut vector<Element>, f: |&mut Element|)
 
@@ -1039,11 +1039,11 @@ Apply the function to a mutable reference to each element in the vector. Implementation -
public inline fun for_each_mut<Element>(v: &mut vector<Element>, f: |&mut Element|) {
+
public inline fun for_each_mut<Element>(self: &mut vector<Element>, f: |&mut Element|) {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        f(borrow_mut(v, i));
+        f(borrow_mut(self, i));
         i = i + 1
     }
 }
@@ -1061,7 +1061,7 @@ Apply the function to mutable references to each pair of elements in the two giv
 This errors out if the vectors are not of the same length.
 
 
-
public fun zip_mut<Element1, Element2>(v1: &mut vector<Element1>, v2: &mut vector<Element2>, f: |(&mut Element1, &mut Element2)|)
+
public fun zip_mut<Element1, Element2>(self: &mut vector<Element1>, v2: &mut vector<Element2>, f: |(&mut Element1, &mut Element2)|)
 
@@ -1071,17 +1071,17 @@ This errors out if the vectors are not of the same length.
public inline fun zip_mut<Element1, Element2>(
-    v1: &mut vector<Element1>,
+    self: &mut vector<Element1>,
     v2: &mut vector<Element2>,
     f: |&mut Element1, &mut Element2|,
 ) {
     let i = 0;
-    let len = length(v1);
+    let len = length(self);
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
     assert!(len == length(v2), 0x20002);
     while (i < len) {
-        f(borrow_mut(v1, i), borrow_mut(v2, i));
+        f(borrow_mut(self, i), borrow_mut(v2, i));
         i = i + 1
     }
 }
@@ -1098,7 +1098,7 @@ This errors out if the vectors are not of the same length.
 Apply the function to a mutable reference of each element in the vector with its index.
 
 
-
public fun enumerate_mut<Element>(v: &mut vector<Element>, f: |(u64, &mut Element)|)
+
public fun enumerate_mut<Element>(self: &mut vector<Element>, f: |(u64, &mut Element)|)
 
@@ -1107,11 +1107,11 @@ Apply the function to a mutable reference of each element in the vector with its Implementation -
public inline fun enumerate_mut<Element>(v: &mut vector<Element>, f: |u64, &mut Element|) {
+
public inline fun enumerate_mut<Element>(self: &mut vector<Element>, f: |u64, &mut Element|) {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        f(i, borrow_mut(v, i));
+        f(i, borrow_mut(self, i));
         i = i + 1;
     };
 }
@@ -1129,7 +1129,7 @@ Fold the function over the elements. For example, fold<Accumulator, Element>(v: vector<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
+
public fun fold<Accumulator, Element>(self: vector<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
 
@@ -1139,12 +1139,12 @@ Fold the function over the elements. For example, fold<Accumulator, Element>( - v: vector<Element>, + self: vector<Element>, init: Accumulator, f: |Accumulator,Element|Accumulator ): Accumulator { let accu = init; - for_each(v, |elem| accu = f(accu, elem)); + for_each(self, |elem| accu = f(accu, elem)); accu }
@@ -1161,7 +1161,7 @@ Fold right like fold above but working right to left. For example, f(1, f(2, f(3, 0))) -
public fun foldr<Accumulator, Element>(v: vector<Element>, init: Accumulator, f: |(Element, Accumulator)|Accumulator): Accumulator
+
public fun foldr<Accumulator, Element>(self: vector<Element>, init: Accumulator, f: |(Element, Accumulator)|Accumulator): Accumulator
 
@@ -1171,12 +1171,12 @@ Fold right like fold above but working right to left. For example, public inline fun foldr<Accumulator, Element>( - v: vector<Element>, + self: vector<Element>, init: Accumulator, f: |Element, Accumulator|Accumulator ): Accumulator { let accu = init; - for_each_reverse(v, |elem| accu = f(elem, accu)); + for_each_reverse(self, |elem| accu = f(elem, accu)); accu }
@@ -1193,7 +1193,7 @@ Map the function over the references of the elements of the vector, producing a original vector. -
public fun map_ref<Element, NewElement>(v: &vector<Element>, f: |&Element|NewElement): vector<NewElement>
+
public fun map_ref<Element, NewElement>(self: &vector<Element>, f: |&Element|NewElement): vector<NewElement>
 
@@ -1203,11 +1203,11 @@ original vector.
public inline fun map_ref<Element, NewElement>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     f: |&Element|NewElement
 ): vector<NewElement> {
     let result = vector<NewElement>[];
-    for_each_ref(v, |elem| push_back(&mut result, f(elem)));
+    for_each_ref(self, |elem| push_back(&mut result, f(elem)));
     result
 }
 
@@ -1224,7 +1224,7 @@ Map the function over the references of the element pairs of two vectors, produc values without modifying the original vectors. -
public fun zip_map_ref<Element1, Element2, NewElement>(v1: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|NewElement): vector<NewElement>
+
public fun zip_map_ref<Element1, Element2, NewElement>(self: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|NewElement): vector<NewElement>
 
@@ -1234,16 +1234,16 @@ values without modifying the original vectors.
public inline fun zip_map_ref<Element1, Element2, NewElement>(
-    v1: &vector<Element1>,
+    self: &vector<Element1>,
     v2: &vector<Element2>,
     f: |&Element1, &Element2|NewElement
 ): vector<NewElement> {
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(length(v1) == length(v2), 0x20002);
+    assert!(length(self) == length(v2), 0x20002);
 
     let result = vector<NewElement>[];
-    zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    zip_ref(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
     result
 }
 
@@ -1259,7 +1259,7 @@ values without modifying the original vectors. Map the function over the elements of the vector, producing a new vector. -
public fun map<Element, NewElement>(v: vector<Element>, f: |Element|NewElement): vector<NewElement>
+
public fun map<Element, NewElement>(self: vector<Element>, f: |Element|NewElement): vector<NewElement>
 
@@ -1269,11 +1269,11 @@ Map the function over the elements of the vector, producing a new vector.
public inline fun map<Element, NewElement>(
-    v: vector<Element>,
+    self: vector<Element>,
     f: |Element|NewElement
 ): vector<NewElement> {
     let result = vector<NewElement>[];
-    for_each(v, |elem| push_back(&mut result, f(elem)));
+    for_each(self, |elem| push_back(&mut result, f(elem)));
     result
 }
 
@@ -1289,7 +1289,7 @@ Map the function over the elements of the vector, producing a new vector. Map the function over the element pairs of the two vectors, producing a new vector. -
public fun zip_map<Element1, Element2, NewElement>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|NewElement): vector<NewElement>
+
public fun zip_map<Element1, Element2, NewElement>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|NewElement): vector<NewElement>
 
@@ -1299,16 +1299,16 @@ Map the function over the element pairs of the two vectors, producing a new vect
public inline fun zip_map<Element1, Element2, NewElement>(
-    v1: vector<Element1>,
+    self: vector<Element1>,
     v2: vector<Element2>,
     f: |Element1, Element2|NewElement
 ): vector<NewElement> {
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(length(&v1) == length(&v2), 0x20002);
+    assert!(length(&self) == length(&v2), 0x20002);
 
     let result = vector<NewElement>[];
-    zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    zip(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
     result
 }
 
@@ -1324,7 +1324,7 @@ Map the function over the element pairs of the two vectors, producing a new vect Filter the vector using the boolean function, removing all elements for which p(e) is not true. -
public fun filter<Element: drop>(v: vector<Element>, p: |&Element|bool): vector<Element>
+
public fun filter<Element: drop>(self: vector<Element>, p: |&Element|bool): vector<Element>
 
@@ -1334,11 +1334,11 @@ Filter the vector using the boolean function, removing all elements for which public inline fun filter<Element:drop>( - v: vector<Element>, + self: vector<Element>, p: |&Element|bool ): vector<Element> { let result = vector<Element>[]; - for_each(v, |elem| { + for_each(self, |elem| { if (p(&elem)) push_back(&mut result, elem); }); result @@ -1358,7 +1358,7 @@ Preserves the relative order of the elements for which pred is true, BUT NOT for the elements for which pred is false. -
public fun partition<Element>(v: &mut vector<Element>, pred: |&Element|bool): u64
+
public fun partition<Element>(self: &mut vector<Element>, pred: |&Element|bool): u64
 
@@ -1368,20 +1368,20 @@ BUT NOT for the elements for which pred is false.
public inline fun partition<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     pred: |&Element|bool
 ): u64 {
     let i = 0;
-    let len = length(v);
+    let len = length(self);
     while (i < len) {
-        if (!pred(borrow(v, i))) break;
+        if (!pred(borrow(self, i))) break;
         i = i + 1;
     };
     let p = i;
     i = i + 1;
     while (i < len) {
-        if (pred(borrow(v, i))) {
-            swap(v, p, i);
+        if (pred(borrow(self, i))) {
+            swap(self, p, i);
             p = p + 1;
         };
         i = i + 1;
@@ -1402,7 +1402,7 @@ rotate(&mut [1, 2, 3, 4, 5], 2) -> [3, 4, 5, 1, 2] in place, returns the split p
 ie. 3 in the example above
 
 
-
public fun rotate<Element>(v: &mut vector<Element>, rot: u64): u64
+
public fun rotate<Element>(self: &mut vector<Element>, rot: u64): u64
 
@@ -1412,11 +1412,11 @@ ie. 3 in the example above
public fun rotate<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     rot: u64
 ): u64 {
-    let len = length(v);
-    rotate_slice(v, 0, rot, len)
+    let len = length(self);
+    rotate_slice(self, 0, rot, len)
 }
 
@@ -1432,7 +1432,7 @@ Same as above but on a sub-slice of an array [left, right) with left <= rot <= r returns the -
public fun rotate_slice<Element>(v: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
+
public fun rotate_slice<Element>(self: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
 
@@ -1442,14 +1442,14 @@ returns the
public fun rotate_slice<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     left: u64,
     rot: u64,
     right: u64
 ): u64 {
-    reverse_slice(v, left, rot);
-    reverse_slice(v, rot, right);
-    reverse_slice(v, left, right);
+    reverse_slice(self, left, rot);
+    reverse_slice(self, rot, right);
+    reverse_slice(self, left, right);
     left + (right - rot)
 }
 
@@ -1466,7 +1466,7 @@ Partition the array based on a predicate p, this routine is stable and thus preserves the relative order of the elements in the two partitions. -
public fun stable_partition<Element>(v: &mut vector<Element>, p: |&Element|bool): u64
+
public fun stable_partition<Element>(self: &mut vector<Element>, p: |&Element|bool): u64
 
@@ -1476,14 +1476,14 @@ preserves the relative order of the elements in the two partitions.
public inline fun stable_partition<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     p: |&Element|bool
 ): u64 {
-    let len = length(v);
+    let len = length(self);
     let t = empty();
     let f = empty();
     while (len > 0) {
-        let e = pop_back(v);
+        let e = pop_back(self);
         if (p(&e)) {
             push_back(&mut t, e);
         } else {
@@ -1492,8 +1492,8 @@ preserves the relative order of the elements in the two partitions.
         len = len - 1;
     };
     let pos = length(&t);
-    reverse_append(v, t);
-    reverse_append(v, f);
+    reverse_append(self, t);
+    reverse_append(self, f);
     pos
 }
 
@@ -1509,7 +1509,7 @@ preserves the relative order of the elements in the two partitions. Return true if any element in the vector satisfies the predicate. -
public fun any<Element>(v: &vector<Element>, p: |&Element|bool): bool
+
public fun any<Element>(self: &vector<Element>, p: |&Element|bool): bool
 
@@ -1519,13 +1519,13 @@ Return true if any element in the vector satisfies the predicate.
public inline fun any<Element>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     p: |&Element|bool
 ): bool {
     let result = false;
     let i = 0;
-    while (i < length(v)) {
-        result = p(borrow(v, i));
+    while (i < length(self)) {
+        result = p(borrow(self, i));
         if (result) {
             break
         };
@@ -1546,7 +1546,7 @@ Return true if any element in the vector satisfies the predicate.
 Return true if all elements in the vector satisfy the predicate.
 
 
-
public fun all<Element>(v: &vector<Element>, p: |&Element|bool): bool
+
public fun all<Element>(self: &vector<Element>, p: |&Element|bool): bool
 
@@ -1556,13 +1556,13 @@ Return true if all elements in the vector satisfy the predicate.
public inline fun all<Element>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     p: |&Element|bool
 ): bool {
     let result = true;
     let i = 0;
-    while (i < length(v)) {
-        result = p(borrow(v, i));
+    while (i < length(self)) {
+        result = p(borrow(self, i));
         if (!result) {
             break
         };
@@ -1584,7 +1584,7 @@ Destroy a vector, just a wrapper around for_each_reverse with a descriptive name
 when used in the context of destroying a vector.
 
 
-
public fun destroy<Element>(v: vector<Element>, d: |Element|)
+
public fun destroy<Element>(self: vector<Element>, d: |Element|)
 
@@ -1594,10 +1594,10 @@ when used in the context of destroying a vector.
public inline fun destroy<Element>(
-    v: vector<Element>,
+    self: vector<Element>,
     d: |Element|
 ) {
-    for_each_reverse(v, |e| d(e))
+    for_each_reverse(self, |e| d(e))
 }
 
@@ -1666,7 +1666,7 @@ when used in the context of destroying a vector. -
public fun slice<Element: copy>(v: &vector<Element>, start: u64, end: u64): vector<Element>
+
public fun slice<Element: copy>(self: &vector<Element>, start: u64, end: u64): vector<Element>
 
@@ -1676,15 +1676,15 @@ when used in the context of destroying a vector.
public fun slice<Element: copy>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     start: u64,
     end: u64
 ): vector<Element> {
-    assert!(start <= end && end <= length(v), EINVALID_SLICE_RANGE);
+    assert!(start <= end && end <= length(self), EINVALID_SLICE_RANGE);
 
     let vec = vector[];
     while (start < end) {
-        push_back(&mut vec, *borrow(v, start));
+        push_back(&mut vec, *borrow(self, start));
         start = start + 1;
     };
     vec
@@ -1706,43 +1706,43 @@ when used in the context of destroying a vector.
 ### Helper Functions
 
 
-Check if v1 is equal to the result of adding e at the end of v2
+Check if self is equal to the result of adding e at the end of v2
 
 
 
 
 
-
fun eq_push_back<Element>(v1: vector<Element>, v2: vector<Element>, e: Element): bool {
-    len(v1) == len(v2) + 1 &&
-    v1[len(v1)-1] == e &&
-    v1[0..len(v1)-1] == v2[0..len(v2)]
+
fun eq_push_back<Element>(self: vector<Element>, v2: vector<Element>, e: Element): bool {
+    len(self) == len(v2) + 1 &&
+    self[len(self)-1] == e &&
+    self[0..len(self)-1] == v2[0..len(v2)]
 }
 
-Check if v is equal to the result of concatenating v1 and v2 +Check if self is equal to the result of concatenating v1 and v2 -
fun eq_append<Element>(v: vector<Element>, v1: vector<Element>, v2: vector<Element>): bool {
-    len(v) == len(v1) + len(v2) &&
-    v[0..len(v1)] == v1 &&
-    v[len(v1)..len(v)] == v2
+
fun eq_append<Element>(self: vector<Element>, v1: vector<Element>, v2: vector<Element>): bool {
+    len(self) == len(v1) + len(v2) &&
+    self[0..len(v1)] == v1 &&
+    self[len(v1)..len(self)] == v2
 }
 
-Check v1 is equal to the result of removing the first element of v2 +Check self is equal to the result of removing the first element of v2 -
fun eq_pop_front<Element>(v1: vector<Element>, v2: vector<Element>): bool {
-    len(v1) + 1 == len(v2) &&
-    v1 == v2[1..len(v2)]
+
fun eq_pop_front<Element>(self: vector<Element>, v2: vector<Element>): bool {
+    len(self) + 1 == len(v2) &&
+    self == v2[1..len(v2)]
 }
 
@@ -1761,14 +1761,14 @@ Check that v1 is equal to the result of removing the element at ind
-Check if v contains e. +Check if self contains e. -
fun spec_contains<Element>(v: vector<Element>, e: Element): bool {
-    exists x in v: x == e
+
fun spec_contains<Element>(self: vector<Element>, e: Element): bool {
+    exists x in self: x == e
 }
 
@@ -1796,7 +1796,7 @@ Check if v contains e. ### Function `reverse` -
public fun reverse<Element>(v: &mut vector<Element>)
+
public fun reverse<Element>(self: &mut vector<Element>)
 
@@ -1812,7 +1812,7 @@ Check if v contains e. ### Function `reverse_slice` -
public fun reverse_slice<Element>(v: &mut vector<Element>, left: u64, right: u64)
+
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64)
 
@@ -1828,7 +1828,7 @@ Check if v contains e. ### Function `append` -
public fun append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -1844,7 +1844,7 @@ Check if v contains e. ### Function `reverse_append` -
public fun reverse_append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -1860,7 +1860,7 @@ Check if v contains e. ### Function `trim` -
public fun trim<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -1876,7 +1876,7 @@ Check if v contains e. ### Function `trim_reverse` -
public fun trim_reverse<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -1892,7 +1892,7 @@ Check if v contains e. ### Function `is_empty` -
public fun is_empty<Element>(v: &vector<Element>): bool
+
public fun is_empty<Element>(self: &vector<Element>): bool
 
@@ -1908,7 +1908,7 @@ Check if v contains e. ### Function `contains` -
public fun contains<Element>(v: &vector<Element>, e: &Element): bool
+
public fun contains<Element>(self: &vector<Element>, e: &Element): bool
 
@@ -1924,7 +1924,7 @@ Check if v contains e. ### Function `index_of` -
public fun index_of<Element>(v: &vector<Element>, e: &Element): (bool, u64)
+
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64)
 
@@ -1940,7 +1940,7 @@ Check if v contains e. ### Function `insert` -
public fun insert<Element>(v: &mut vector<Element>, i: u64, e: Element)
+
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
 
@@ -1956,7 +1956,7 @@ Check if v contains e. ### Function `remove` -
public fun remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -1972,7 +1972,7 @@ Check if v contains e. ### Function `remove_value` -
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element>
+
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element>
 
@@ -1988,7 +1988,7 @@ Check if v contains e. ### Function `swap_remove` -
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -2004,7 +2004,7 @@ Check if v contains e. ### Function `rotate` -
public fun rotate<Element>(v: &mut vector<Element>, rot: u64): u64
+
public fun rotate<Element>(self: &mut vector<Element>, rot: u64): u64
 
@@ -2020,7 +2020,7 @@ Check if v contains e. ### Function `rotate_slice` -
public fun rotate_slice<Element>(v: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
+
public fun rotate_slice<Element>(self: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
 
diff --git a/aptos-move/framework/move-stdlib/sources/acl.move b/aptos-move/framework/move-stdlib/sources/acl.move index 5cf71e635e182..6adb0e2a9f89e 100644 --- a/aptos-move/framework/move-stdlib/sources/acl.move +++ b/aptos-move/framework/move-stdlib/sources/acl.move @@ -22,25 +22,25 @@ module std::acl { } /// Add the address to the ACL. - public fun add(acl: &mut ACL, addr: address) { - assert!(!vector::contains(&mut acl.list, &addr), error::invalid_argument(ECONTAIN)); - vector::push_back(&mut acl.list, addr); + public fun add(self: &mut ACL, addr: address) { + assert!(!vector::contains(&mut self.list, &addr), error::invalid_argument(ECONTAIN)); + vector::push_back(&mut self.list, addr); } /// Remove the address from the ACL. - public fun remove(acl: &mut ACL, addr: address) { - let (found, index) = vector::index_of(&mut acl.list, &addr); + public fun remove(self: &mut ACL, addr: address) { + let (found, index) = vector::index_of(&mut self.list, &addr); assert!(found, error::invalid_argument(ENOT_CONTAIN)); - vector::remove(&mut acl.list, index); + vector::remove(&mut self.list, index); } /// Return true iff the ACL contains the address. - public fun contains(acl: &ACL, addr: address): bool { - vector::contains(&acl.list, &addr) + public fun contains(self: &ACL, addr: address): bool { + vector::contains(&self.list, &addr) } /// assert! that the ACL has the address. - public fun assert_contains(acl: &ACL, addr: address) { - assert!(contains(acl, addr), error::invalid_argument(ENOT_CONTAIN)); + public fun assert_contains(self: &ACL, addr: address) { + assert!(contains(self, addr), error::invalid_argument(ENOT_CONTAIN)); } } diff --git a/aptos-move/framework/move-stdlib/sources/acl.spec.move b/aptos-move/framework/move-stdlib/sources/acl.spec.move index 843496f4e8848..dcbb93e0b1f7f 100644 --- a/aptos-move/framework/move-stdlib/sources/acl.spec.move +++ b/aptos-move/framework/move-stdlib/sources/acl.spec.move @@ -3,25 +3,25 @@ spec std::acl { invariant forall i in 0..len(list), j in 0..len(list): list[i] == list[j] ==> i == j; } - spec fun spec_contains(acl: ACL, addr: address): bool { - exists a in acl.list: a == addr + spec fun spec_contains(self: ACL, addr: address): bool { + exists a in self.list: a == addr } - spec contains(acl: &ACL, addr: address): bool { - ensures result == spec_contains(acl, addr); + spec contains(self: &ACL, addr: address): bool { + ensures result == spec_contains(self, addr); } - spec add(acl: &mut ACL, addr: address) { - aborts_if spec_contains(acl, addr) with error::INVALID_ARGUMENT; - ensures spec_contains(acl, addr); + spec add(self: &mut ACL, addr: address) { + aborts_if spec_contains(self, addr) with error::INVALID_ARGUMENT; + ensures spec_contains(self, addr); } - spec remove(acl: &mut ACL, addr: address) { - aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT; - ensures !spec_contains(acl, addr); + spec remove(self: &mut ACL, addr: address) { + aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT; + ensures !spec_contains(self, addr); } - spec assert_contains(acl: &ACL, addr: address) { - aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT; + spec assert_contains(self: &ACL, addr: address) { + aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT; } } diff --git a/aptos-move/framework/move-stdlib/sources/bcs.move b/aptos-move/framework/move-stdlib/sources/bcs.move index 79b4c988906f2..478cbed627f24 100644 --- a/aptos-move/framework/move-stdlib/sources/bcs.move +++ b/aptos-move/framework/move-stdlib/sources/bcs.move @@ -3,9 +3,14 @@ /// published on-chain. See https://github.com/aptos-labs/bcs#binary-canonical-serialization-bcs for more /// details on BCS. module std::bcs { - /// Return the binary representation of `v` in BCS (Binary Canonical Serialization) format + /// Returns the binary representation of `v` in BCS (Binary Canonical Serialization) format. + /// Aborts with `0x1c5` error code if serialization fails. native public fun to_bytes(v: &MoveValue): vector; + /// Returns the size of the binary representation of `v` in BCS (Binary Canonical Serialization) format. + /// Aborts with `0x1c5` error code if there is a failure when calculating serialized size. + native public fun serialized_size(v: &MoveValue): u64; + // ============================== // Module Specification spec module {} // switch to module documentation context @@ -14,4 +19,9 @@ module std::bcs { /// Native function which is defined in the prover's prelude. native fun serialize(v: &MoveValue): vector; } + + spec serialized_size(v: &MoveValue): u64 { + pragma opaque; + ensures result == len(serialize(v)); + } } diff --git a/aptos-move/framework/move-stdlib/sources/bit_vector.move b/aptos-move/framework/move-stdlib/sources/bit_vector.move index 7bf3e22694444..b006061964de8 100644 --- a/aptos-move/framework/move-stdlib/sources/bit_vector.move +++ b/aptos-move/framework/move-stdlib/sources/bit_vector.move @@ -53,58 +53,58 @@ module std::bit_vector { aborts_if length >= MAX_SIZE with ELENGTH; } - /// Set the bit at `bit_index` in the `bitvector` regardless of its previous state. - public fun set(bitvector: &mut BitVector, bit_index: u64) { - assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX); - let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index); + /// Set the bit at `bit_index` in the `self` regardless of its previous state. + public fun set(self: &mut BitVector, bit_index: u64) { + assert!(bit_index < vector::length(&self.bit_field), EINDEX); + let x = vector::borrow_mut(&mut self.bit_field, bit_index); *x = true; } spec set { include SetAbortsIf; - ensures bitvector.bit_field[bit_index]; + ensures self.bit_field[bit_index]; } spec schema SetAbortsIf { - bitvector: BitVector; + self: BitVector; bit_index: u64; - aborts_if bit_index >= length(bitvector) with EINDEX; + aborts_if bit_index >= length(self) with EINDEX; } - /// Unset the bit at `bit_index` in the `bitvector` regardless of its previous state. - public fun unset(bitvector: &mut BitVector, bit_index: u64) { - assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX); - let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index); + /// Unset the bit at `bit_index` in the `self` regardless of its previous state. + public fun unset(self: &mut BitVector, bit_index: u64) { + assert!(bit_index < vector::length(&self.bit_field), EINDEX); + let x = vector::borrow_mut(&mut self.bit_field, bit_index); *x = false; } spec unset { include UnsetAbortsIf; - ensures !bitvector.bit_field[bit_index]; + ensures !self.bit_field[bit_index]; } spec schema UnsetAbortsIf { - bitvector: BitVector; + self: BitVector; bit_index: u64; - aborts_if bit_index >= length(bitvector) with EINDEX; + aborts_if bit_index >= length(self) with EINDEX; } - /// Shift the `bitvector` left by `amount`. If `amount` is greater than the + /// Shift the `self` left by `amount`. If `amount` is greater than the /// bitvector's length the bitvector will be zeroed out. - public fun shift_left(bitvector: &mut BitVector, amount: u64) { - if (amount >= bitvector.length) { - vector::for_each_mut(&mut bitvector.bit_field, |elem| { + public fun shift_left(self: &mut BitVector, amount: u64) { + if (amount >= self.length) { + vector::for_each_mut(&mut self.bit_field, |elem| { *elem = false; }); } else { let i = amount; - while (i < bitvector.length) { - if (is_index_set(bitvector, i)) set(bitvector, i - amount) - else unset(bitvector, i - amount); + while (i < self.length) { + if (is_index_set(self, i)) set(self, i - amount) + else unset(self, i - amount); i = i + 1; }; - i = bitvector.length - amount; + i = self.length - amount; - while (i < bitvector.length) { - unset(bitvector, i); + while (i < self.length) { + unset(self, i); i = i + 1; }; } @@ -114,62 +114,62 @@ module std::bit_vector { pragma verify = false; } - /// Return the value of the bit at `bit_index` in the `bitvector`. `true` + /// Return the value of the bit at `bit_index` in the `self`. `true` /// represents "1" and `false` represents a 0 - public fun is_index_set(bitvector: &BitVector, bit_index: u64): bool { - assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX); - *vector::borrow(&bitvector.bit_field, bit_index) + public fun is_index_set(self: &BitVector, bit_index: u64): bool { + assert!(bit_index < vector::length(&self.bit_field), EINDEX); + *vector::borrow(&self.bit_field, bit_index) } spec is_index_set { include IsIndexSetAbortsIf; - ensures result == bitvector.bit_field[bit_index]; + ensures result == self.bit_field[bit_index]; } spec schema IsIndexSetAbortsIf { - bitvector: BitVector; + self: BitVector; bit_index: u64; - aborts_if bit_index >= length(bitvector) with EINDEX; + aborts_if bit_index >= length(self) with EINDEX; } - spec fun spec_is_index_set(bitvector: BitVector, bit_index: u64): bool { - if (bit_index >= length(bitvector)) { + spec fun spec_is_index_set(self: BitVector, bit_index: u64): bool { + if (bit_index >= length(self)) { false } else { - bitvector.bit_field[bit_index] + self.bit_field[bit_index] } } /// Return the length (number of usable bits) of this bitvector - public fun length(bitvector: &BitVector): u64 { - vector::length(&bitvector.bit_field) + public fun length(self: &BitVector): u64 { + vector::length(&self.bit_field) } /// Returns the length of the longest sequence of set bits starting at (and /// including) `start_index` in the `bitvector`. If there is no such /// sequence, then `0` is returned. - public fun longest_set_sequence_starting_at(bitvector: &BitVector, start_index: u64): u64 { - assert!(start_index < bitvector.length, EINDEX); + public fun longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 { + assert!(start_index < self.length, EINDEX); let index = start_index; // Find the greatest index in the vector such that all indices less than it are set. while ({ spec { invariant index >= start_index; - invariant index == start_index || is_index_set(bitvector, index - 1); - invariant index == start_index || index - 1 < vector::length(bitvector.bit_field); - invariant forall j in start_index..index: is_index_set(bitvector, j); - invariant forall j in start_index..index: j < vector::length(bitvector.bit_field); + invariant index == start_index || is_index_set(self, index - 1); + invariant index == start_index || index - 1 < vector::length(self.bit_field); + invariant forall j in start_index..index: is_index_set(self, j); + invariant forall j in start_index..index: j < vector::length(self.bit_field); }; - index < bitvector.length + index < self.length }) { - if (!is_index_set(bitvector, index)) break; + if (!is_index_set(self, index)) break; index = index + 1; }; index - start_index } - spec longest_set_sequence_starting_at(bitvector: &BitVector, start_index: u64): u64 { - aborts_if start_index >= bitvector.length; - ensures forall i in start_index..result: is_index_set(bitvector, i); + spec longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 { + aborts_if start_index >= self.length; + ensures forall i in start_index..result: is_index_set(self, i); } #[test_only] @@ -178,19 +178,19 @@ module std::bit_vector { } #[verify_only] - public fun shift_left_for_verification_only(bitvector: &mut BitVector, amount: u64) { - if (amount >= bitvector.length) { - let len = vector::length(&bitvector.bit_field); + public fun shift_left_for_verification_only(self: &mut BitVector, amount: u64) { + if (amount >= self.length) { + let len = vector::length(&self.bit_field); let i = 0; while ({ spec { - invariant len == bitvector.length; - invariant forall k in 0..i: !bitvector.bit_field[k]; - invariant forall k in i..bitvector.length: bitvector.bit_field[k] == old(bitvector).bit_field[k]; + invariant len == self.length; + invariant forall k in 0..i: !self.bit_field[k]; + invariant forall k in i..self.length: self.bit_field[k] == old(self).bit_field[k]; }; i < len }) { - let elem = vector::borrow_mut(&mut bitvector.bit_field, i); + let elem = vector::borrow_mut(&mut self.bit_field, i); *elem = false; i = i + 1; }; @@ -200,40 +200,40 @@ module std::bit_vector { while ({ spec { invariant i >= amount; - invariant bitvector.length == old(bitvector).length; - invariant forall j in amount..i: old(bitvector).bit_field[j] == bitvector.bit_field[j - amount]; - invariant forall j in (i-amount)..bitvector.length : old(bitvector).bit_field[j] == bitvector.bit_field[j]; - invariant forall k in 0..i-amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount]; + invariant self.length == old(self).length; + invariant forall j in amount..i: old(self).bit_field[j] == self.bit_field[j - amount]; + invariant forall j in (i-amount)..self.length : old(self).bit_field[j] == self.bit_field[j]; + invariant forall k in 0..i-amount: self.bit_field[k] == old(self).bit_field[k + amount]; }; - i < bitvector.length + i < self.length }) { - if (is_index_set(bitvector, i)) set(bitvector, i - amount) - else unset(bitvector, i - amount); + if (is_index_set(self, i)) set(self, i - amount) + else unset(self, i - amount); i = i + 1; }; - i = bitvector.length - amount; + i = self.length - amount; while ({ spec { - invariant forall j in bitvector.length - amount..i: !bitvector.bit_field[j]; - invariant forall k in 0..bitvector.length - amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount]; - invariant i >= bitvector.length - amount; + invariant forall j in self.length - amount..i: !self.bit_field[j]; + invariant forall k in 0..self.length - amount: self.bit_field[k] == old(self).bit_field[k + amount]; + invariant i >= self.length - amount; }; - i < bitvector.length + i < self.length }) { - unset(bitvector, i); + unset(self, i); i = i + 1; } } } spec shift_left_for_verification_only { aborts_if false; - ensures amount >= bitvector.length ==> (forall k in 0..bitvector.length: !bitvector.bit_field[k]); - ensures amount < bitvector.length ==> - (forall i in bitvector.length - amount..bitvector.length: !bitvector.bit_field[i]); - ensures amount < bitvector.length ==> - (forall i in 0..bitvector.length - amount: bitvector.bit_field[i] == old(bitvector).bit_field[i + amount]); + ensures amount >= self.length ==> (forall k in 0..self.length: !self.bit_field[k]); + ensures amount < self.length ==> + (forall i in self.length - amount..self.length: !self.bit_field[i]); + ensures amount < self.length ==> + (forall i in 0..self.length - amount: self.bit_field[i] == old(self).bit_field[i + amount]); } } diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index 8fbe0984859ae..e3aa750255dad 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -582,6 +582,27 @@ module std::features { is_enabled(ABORT_IF_MULTISIG_PAYLOAD_MISMATCH) } + /// Whether the simulation enhancement is enabled. This enables the simulation without an authentication check, + /// the sponsored transaction simulation when the fee payer is set to 0x0, and the multisig transaction + /// simulation consistnet with the execution. + /// + /// Lifetime: transient + const TRANSACTION_SIMULATION_ENHANCEMENT: u64 = 78; + + public fun get_transaction_simulation_enhancement_feature(): u64 { TRANSACTION_SIMULATION_ENHANCEMENT } + + public fun transaction_simulation_enhancement_enabled(): bool acquires Features { + is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT) + } + + const COLLECTION_OWNER: u64 = 79; + + public fun get_collection_owner_feature(): u64 { COLLECTION_OWNER } + + public fun is_collection_owner_enabled(): bool acquires Features { + is_enabled(COLLECTION_OWNER) + } + // ============================================================================================ // Feature Flag Implementation diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.spec.move b/aptos-move/framework/move-stdlib/sources/configs/features.spec.move index 2823108154016..e8daa8950e396 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.spec.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.spec.move @@ -100,6 +100,10 @@ spec std::features { spec_is_enabled(ABORT_IF_MULTISIG_PAYLOAD_MISMATCH) } + spec fun spec_simulation_enhancement_enabled(): bool { + spec_is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT) + } + spec abort_if_multisig_payload_mismatch_enabled { pragma opaque; aborts_if [abstract] false; diff --git a/aptos-move/framework/move-stdlib/sources/fixed_point32.move b/aptos-move/framework/move-stdlib/sources/fixed_point32.move index 96409a9ac4dfd..82611cb4b39b0 100644 --- a/aptos-move/framework/move-stdlib/sources/fixed_point32.move +++ b/aptos-move/framework/move-stdlib/sources/fixed_point32.move @@ -144,13 +144,13 @@ module std::fixed_point32 { /// Accessor for the raw u64 value. Other less common operations, such as /// adding or subtracting FixedPoint32 values, can be done using the raw /// values directly. - public fun get_raw_value(num: FixedPoint32): u64 { - num.value + public fun get_raw_value(self: FixedPoint32): u64 { + self.value } /// Returns true if the ratio is zero. - public fun is_zero(num: FixedPoint32): bool { - num.value == 0 + public fun is_zero(self: FixedPoint32): bool { + self.value == 0 } /// Returns the smaller of the two FixedPoint32 numbers. @@ -216,27 +216,27 @@ module std::fixed_point32 { } /// Returns the largest integer less than or equal to a given number. - public fun floor(num: FixedPoint32): u64 { - num.value >> 32 + public fun floor(self: FixedPoint32): u64 { + self.value >> 32 } spec floor { pragma opaque; aborts_if false; - ensures result == spec_floor(num); + ensures result == spec_floor(self); } - spec fun spec_floor(val: FixedPoint32): u64 { - let fractional = val.value % (1 << 32); + spec fun spec_floor(self: FixedPoint32): u64 { + let fractional = self.value % (1 << 32); if (fractional == 0) { - val.value >> 32 + self.value >> 32 } else { - (val.value - fractional) >> 32 + (self.value - fractional) >> 32 } } /// Rounds up the given FixedPoint32 to the next largest integer. - public fun ceil(num: FixedPoint32): u64 { - let floored_num = floor(num) << 32; - if (num.value == floored_num) { + public fun ceil(self: FixedPoint32): u64 { + let floored_num = floor(self) << 32; + if (self.value == floored_num) { return floored_num >> 32 }; let val = ((floored_num as u128) + (1 << 32)); @@ -246,42 +246,42 @@ module std::fixed_point32 { pragma verify_duration_estimate = 120; pragma opaque; aborts_if false; - ensures result == spec_ceil(num); + ensures result == spec_ceil(self); } - spec fun spec_ceil(val: FixedPoint32): u64 { - let fractional = val.value % (1 << 32); + spec fun spec_ceil(self: FixedPoint32): u64 { + let fractional = self.value % (1 << 32); let one = 1 << 32; if (fractional == 0) { - val.value >> 32 + self.value >> 32 } else { - (val.value - fractional + one) >> 32 + (self.value - fractional + one) >> 32 } } /// Returns the value of a FixedPoint32 to the nearest integer. - public fun round(num: FixedPoint32): u64 { - let floored_num = floor(num) << 32; + public fun round(self: FixedPoint32): u64 { + let floored_num = floor(self) << 32; let boundary = floored_num + ((1 << 32) / 2); - if (num.value < boundary) { + if (self.value < boundary) { floored_num >> 32 } else { - ceil(num) + ceil(self) } } spec round { pragma verify_duration_estimate = 120; pragma opaque; aborts_if false; - ensures result == spec_round(num); + ensures result == spec_round(self); } - spec fun spec_round(val: FixedPoint32): u64 { - let fractional = val.value % (1 << 32); + spec fun spec_round(self: FixedPoint32): u64 { + let fractional = self.value % (1 << 32); let boundary = (1 << 32) / 2; let one = 1 << 32; if (fractional < boundary) { - (val.value - fractional) >> 32 + (self.value - fractional) >> 32 } else { - (val.value - fractional + one) >> 32 + (self.value - fractional + one) >> 32 } } diff --git a/aptos-move/framework/move-stdlib/sources/option.move b/aptos-move/framework/move-stdlib/sources/option.move index 1793abfe9bc20..50aefbe12d008 100644 --- a/aptos-move/framework/move-stdlib/sources/option.move +++ b/aptos-move/framework/move-stdlib/sources/option.move @@ -57,134 +57,134 @@ module std::option { aborts_if vector::length(vec) > 1; } - /// Return true if `t` does not hold a value - public fun is_none(t: &Option): bool { - vector::is_empty(&t.vec) + /// Return true if `self` does not hold a value + public fun is_none(self: &Option): bool { + vector::is_empty(&self.vec) } spec is_none { pragma opaque; aborts_if false; - ensures result == spec_is_none(t); + ensures result == spec_is_none(self); } - spec fun spec_is_none(t: Option): bool { - vector::is_empty(t.vec) + spec fun spec_is_none(self: Option): bool { + vector::is_empty(self.vec) } - /// Return true if `t` holds a value - public fun is_some(t: &Option): bool { - !vector::is_empty(&t.vec) + /// Return true if `self` holds a value + public fun is_some(self: &Option): bool { + !vector::is_empty(&self.vec) } spec is_some { pragma opaque; aborts_if false; - ensures result == spec_is_some(t); + ensures result == spec_is_some(self); } - spec fun spec_is_some(t: Option): bool { - !vector::is_empty(t.vec) + spec fun spec_is_some(self: Option): bool { + !vector::is_empty(self.vec) } - /// Return true if the value in `t` is equal to `e_ref` - /// Always returns `false` if `t` does not hold a value - public fun contains(t: &Option, e_ref: &Element): bool { - vector::contains(&t.vec, e_ref) + /// Return true if the value in `self` is equal to `e_ref` + /// Always returns `false` if `self` does not hold a value + public fun contains(self: &Option, e_ref: &Element): bool { + vector::contains(&self.vec, e_ref) } spec contains { pragma opaque; aborts_if false; - ensures result == spec_contains(t, e_ref); + ensures result == spec_contains(self, e_ref); } - spec fun spec_contains(t: Option, e: Element): bool { - is_some(t) && borrow(t) == e + spec fun spec_contains(self: Option, e: Element): bool { + is_some(self) && borrow(self) == e } - /// Return an immutable reference to the value inside `t` - /// Aborts if `t` does not hold a value - public fun borrow(t: &Option): &Element { - assert!(is_some(t), EOPTION_NOT_SET); - vector::borrow(&t.vec, 0) + /// Return an immutable reference to the value inside `self` + /// Aborts if `self` does not hold a value + public fun borrow(self: &Option): &Element { + assert!(is_some(self), EOPTION_NOT_SET); + vector::borrow(&self.vec, 0) } spec borrow { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(t); + ensures result == spec_borrow(self); } - spec fun spec_borrow(t: Option): Element { - t.vec[0] + spec fun spec_borrow(self: Option): Element { + self.vec[0] } - /// Return a reference to the value inside `t` if it holds one - /// Return `default_ref` if `t` does not hold a value - public fun borrow_with_default(t: &Option, default_ref: &Element): &Element { - let vec_ref = &t.vec; + /// Return a reference to the value inside `self` if it holds one + /// Return `default_ref` if `self` does not hold a value + public fun borrow_with_default(self: &Option, default_ref: &Element): &Element { + let vec_ref = &self.vec; if (vector::is_empty(vec_ref)) default_ref else vector::borrow(vec_ref, 0) } spec borrow_with_default { pragma opaque; aborts_if false; - ensures result == (if (spec_is_some(t)) spec_borrow(t) else default_ref); + ensures result == (if (spec_is_some(self)) spec_borrow(self) else default_ref); } - /// Return the value inside `t` if it holds one - /// Return `default` if `t` does not hold a value + /// Return the value inside `self` if it holds one + /// Return `default` if `self` does not hold a value public fun get_with_default( - t: &Option, + self: &Option, default: Element, ): Element { - let vec_ref = &t.vec; + let vec_ref = &self.vec; if (vector::is_empty(vec_ref)) default else *vector::borrow(vec_ref, 0) } spec get_with_default { pragma opaque; aborts_if false; - ensures result == (if (spec_is_some(t)) spec_borrow(t) else default); + ensures result == (if (spec_is_some(self)) spec_borrow(self) else default); } - /// Convert the none option `t` to a some option by adding `e`. - /// Aborts if `t` already holds a value - public fun fill(t: &mut Option, e: Element) { - let vec_ref = &mut t.vec; + /// Convert the none option `self` to a some option by adding `e`. + /// Aborts if `self` already holds a value + public fun fill(self: &mut Option, e: Element) { + let vec_ref = &mut self.vec; if (vector::is_empty(vec_ref)) vector::push_back(vec_ref, e) else abort EOPTION_IS_SET } spec fill { pragma opaque; - aborts_if spec_is_some(t) with EOPTION_IS_SET; - ensures spec_is_some(t); - ensures spec_borrow(t) == e; + aborts_if spec_is_some(self) with EOPTION_IS_SET; + ensures spec_is_some(self); + ensures spec_borrow(self) == e; } - /// Convert a `some` option to a `none` by removing and returning the value stored inside `t` - /// Aborts if `t` does not hold a value - public fun extract(t: &mut Option): Element { - assert!(is_some(t), EOPTION_NOT_SET); - vector::pop_back(&mut t.vec) + /// Convert a `some` option to a `none` by removing and returning the value stored inside `self` + /// Aborts if `self` does not hold a value + public fun extract(self: &mut Option): Element { + assert!(is_some(self), EOPTION_NOT_SET); + vector::pop_back(&mut self.vec) } spec extract { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(old(t)); - ensures spec_is_none(t); + ensures result == spec_borrow(old(self)); + ensures spec_is_none(self); } - /// Return a mutable reference to the value inside `t` - /// Aborts if `t` does not hold a value - public fun borrow_mut(t: &mut Option): &mut Element { - assert!(is_some(t), EOPTION_NOT_SET); - vector::borrow_mut(&mut t.vec, 0) + /// Return a mutable reference to the value inside `self` + /// Aborts if `self` does not hold a value + public fun borrow_mut(self: &mut Option): &mut Element { + assert!(is_some(self), EOPTION_NOT_SET); + vector::borrow_mut(&mut self.vec, 0) } spec borrow_mut { include AbortsIfNone; - ensures result == spec_borrow(t); - ensures t == old(t); + ensures result == spec_borrow(self); + ensures self == old(self); } - /// Swap the old value inside `t` with `e` and return the old value - /// Aborts if `t` does not hold a value - public fun swap(t: &mut Option, e: Element): Element { - assert!(is_some(t), EOPTION_NOT_SET); - let vec_ref = &mut t.vec; + /// Swap the old value inside `self` with `e` and return the old value + /// Aborts if `self` does not hold a value + public fun swap(self: &mut Option, e: Element): Element { + assert!(is_some(self), EOPTION_NOT_SET); + let vec_ref = &mut self.vec; let old_value = vector::pop_back(vec_ref); vector::push_back(vec_ref, e); old_value @@ -192,16 +192,16 @@ module std::option { spec swap { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(old(t)); - ensures spec_is_some(t); - ensures spec_borrow(t) == e; + ensures result == spec_borrow(old(self)); + ensures spec_is_some(self); + ensures spec_borrow(self) == e; } - /// Swap the old value inside `t` with `e` and return the old value; + /// Swap the old value inside `self` with `e` and return the old value; /// or if there is no old value, fill it with `e`. - /// Different from swap(), swap_or_fill() allows for `t` not holding a value. - public fun swap_or_fill(t: &mut Option, e: Element): Option { - let vec_ref = &mut t.vec; + /// Different from swap(), swap_or_fill() allows for `self` not holding a value. + public fun swap_or_fill(self: &mut Option, e: Element): Option { + let vec_ref = &mut self.vec; let old_value = if (vector::is_empty(vec_ref)) none() else some(vector::pop_back(vec_ref)); vector::push_back(vec_ref, e); @@ -210,27 +210,27 @@ module std::option { spec swap_or_fill { pragma opaque; aborts_if false; - ensures result == old(t); - ensures spec_borrow(t) == e; + ensures result == old(self); + ensures spec_borrow(self) == e; } - /// Destroys `t.` If `t` holds a value, return it. Returns `default` otherwise - public fun destroy_with_default(t: Option, default: Element): Element { - let Option { vec } = t; + /// Destroys `self.` If `self` holds a value, return it. Returns `default` otherwise + public fun destroy_with_default(self: Option, default: Element): Element { + let Option { vec } = self; if (vector::is_empty(&mut vec)) default else vector::pop_back(&mut vec) } spec destroy_with_default { pragma opaque; aborts_if false; - ensures result == (if (spec_is_some(t)) spec_borrow(t) else default); + ensures result == (if (spec_is_some(self)) spec_borrow(self) else default); } - /// Unpack `t` and return its contents - /// Aborts if `t` does not hold a value - public fun destroy_some(t: Option): Element { - assert!(is_some(&t), EOPTION_NOT_SET); - let Option { vec } = t; + /// Unpack `self` and return its contents + /// Aborts if `self` does not hold a value + public fun destroy_some(self: Option): Element { + assert!(is_some(&self), EOPTION_NOT_SET); + let Option { vec } = self; let elem = vector::pop_back(&mut vec); vector::destroy_empty(vec); elem @@ -238,106 +238,106 @@ module std::option { spec destroy_some { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(t); + ensures result == spec_borrow(self); } - /// Unpack `t` - /// Aborts if `t` holds a value - public fun destroy_none(t: Option) { - assert!(is_none(&t), EOPTION_IS_SET); - let Option { vec } = t; + /// Unpack `self` + /// Aborts if `self` holds a value + public fun destroy_none(self: Option) { + assert!(is_none(&self), EOPTION_IS_SET); + let Option { vec } = self; vector::destroy_empty(vec) } spec destroy_none { pragma opaque; - aborts_if spec_is_some(t) with EOPTION_IS_SET; + aborts_if spec_is_some(self) with EOPTION_IS_SET; } - /// Convert `t` into a vector of length 1 if it is `Some`, + /// Convert `self` into a vector of length 1 if it is `Some`, /// and an empty vector otherwise - public fun to_vec(t: Option): vector { - let Option { vec } = t; + public fun to_vec(self: Option): vector { + let Option { vec } = self; vec } spec to_vec { pragma opaque; aborts_if false; - ensures result == t.vec; + ensures result == self.vec; } /// Apply the function to the optional element, consuming it. Does nothing if no value present. - public inline fun for_each(o: Option, f: |Element|) { - if (is_some(&o)) { - f(destroy_some(o)) + public inline fun for_each(self: Option, f: |Element|) { + if (is_some(&self)) { + f(destroy_some(self)) } else { - destroy_none(o) + destroy_none(self) } } /// Apply the function to the optional element reference. Does nothing if no value present. - public inline fun for_each_ref(o: &Option, f: |&Element|) { - if (is_some(o)) { - f(borrow(o)) + public inline fun for_each_ref(self: &Option, f: |&Element|) { + if (is_some(self)) { + f(borrow(self)) } } /// Apply the function to the optional element reference. Does nothing if no value present. - public inline fun for_each_mut(o: &mut Option, f: |&mut Element|) { - if (is_some(o)) { - f(borrow_mut(o)) + public inline fun for_each_mut(self: &mut Option, f: |&mut Element|) { + if (is_some(self)) { + f(borrow_mut(self)) } } /// Folds the function over the optional element. public inline fun fold( - o: Option, + self: Option, init: Accumulator, f: |Accumulator,Element|Accumulator ): Accumulator { - if (is_some(&o)) { - f(init, destroy_some(o)) + if (is_some(&self)) { + f(init, destroy_some(self)) } else { - destroy_none(o); + destroy_none(self); init } } /// Maps the content of an option. - public inline fun map(o: Option, f: |Element|OtherElement): Option { - if (is_some(&o)) { - some(f(destroy_some(o))) + public inline fun map(self: Option, f: |Element|OtherElement): Option { + if (is_some(&self)) { + some(f(destroy_some(self))) } else { - destroy_none(o); + destroy_none(self); none() } } /// Maps the content of an option without destroying the original option. public inline fun map_ref( - o: &Option, f: |&Element|OtherElement): Option { - if (is_some(o)) { - some(f(borrow(o))) + self: &Option, f: |&Element|OtherElement): Option { + if (is_some(self)) { + some(f(borrow(self))) } else { none() } } /// Filters the content of an option - public inline fun filter(o: Option, f: |&Element|bool): Option { - if (is_some(&o) && f(borrow(&o))) { - o + public inline fun filter(self: Option, f: |&Element|bool): Option { + if (is_some(&self) && f(borrow(&self))) { + self } else { none() } } /// Returns true if the option contains an element which satisfies predicate. - public inline fun any(o: &Option, p: |&Element|bool): bool { - is_some(o) && p(borrow(o)) + public inline fun any(self: &Option, p: |&Element|bool): bool { + is_some(self) && p(borrow(self)) } /// Utility function to destroy an option that is not droppable. - public inline fun destroy(o: Option, d: |Element|) { - let vec = to_vec(o); + public inline fun destroy(self: Option, d: |Element|) { + let vec = to_vec(self); vector::destroy(vec, |e| d(e)); } @@ -350,7 +350,7 @@ module std::option { /// # Helper Schema spec schema AbortsIfNone { - t: Option; - aborts_if spec_is_none(t) with EOPTION_NOT_SET; + self: Option; + aborts_if spec_is_none(self) with EOPTION_NOT_SET; } } diff --git a/aptos-move/framework/move-stdlib/sources/string.move b/aptos-move/framework/move-stdlib/sources/string.move index 6a2ca69d00ec7..549aa2380e36a 100644 --- a/aptos-move/framework/move-stdlib/sources/string.move +++ b/aptos-move/framework/move-stdlib/sources/string.move @@ -30,48 +30,48 @@ module std::string { } /// Returns a reference to the underlying byte vector. - public fun bytes(s: &String): &vector { - &s.bytes + public fun bytes(self: &String): &vector { + &self.bytes } /// Checks whether this string is empty. - public fun is_empty(s: &String): bool { - vector::is_empty(&s.bytes) + public fun is_empty(self: &String): bool { + vector::is_empty(&self.bytes) } /// Returns the length of this string, in bytes. - public fun length(s: &String): u64 { - vector::length(&s.bytes) + public fun length(self: &String): u64 { + vector::length(&self.bytes) } /// Appends a string. - public fun append(s: &mut String, r: String) { - vector::append(&mut s.bytes, r.bytes) + public fun append(self: &mut String, r: String) { + vector::append(&mut self.bytes, r.bytes) } /// Appends bytes which must be in valid utf8 format. - public fun append_utf8(s: &mut String, bytes: vector) { - append(s, utf8(bytes)) + public fun append_utf8(self: &mut String, bytes: vector) { + append(self, utf8(bytes)) } /// Insert the other string at the byte index in given string. The index must be at a valid utf8 char /// boundary. - public fun insert(s: &mut String, at: u64, o: String) { - let bytes = &s.bytes; + public fun insert(self: &mut String, at: u64, o: String) { + let bytes = &self.bytes; assert!(at <= vector::length(bytes) && internal_is_char_boundary(bytes, at), EINVALID_INDEX); - let l = length(s); - let front = sub_string(s, 0, at); - let end = sub_string(s, at, l); + let l = length(self); + let front = sub_string(self, 0, at); + let end = sub_string(self, at, l); append(&mut front, o); append(&mut front, end); - *s = front; + *self = front; } /// Returns a sub-string using the given byte indices, where `i` is the first byte position and `j` is the start /// of the first byte not included (or the length of the string). The indices must be at valid utf8 char boundaries, /// guaranteeing that the result is valid utf8. - public fun sub_string(s: &String, i: u64, j: u64): String { - let bytes = &s.bytes; + public fun sub_string(self: &String, i: u64, j: u64): String { + let bytes = &self.bytes; let l = vector::length(bytes); assert!( j <= l && i <= j && internal_is_char_boundary(bytes, i) && internal_is_char_boundary(bytes, j), @@ -81,8 +81,8 @@ module std::string { } /// Computes the index of the first occurrence of a string. Returns `length(s)` if no occurrence found. - public fun index_of(s: &String, r: &String): u64 { - internal_index_of(&s.bytes, &r.bytes) + public fun index_of(self: &String, r: &String): u64 { + internal_index_of(&self.bytes, &r.bytes) } // Native API diff --git a/aptos-move/framework/move-stdlib/sources/vector.move b/aptos-move/framework/move-stdlib/sources/vector.move index 05368acf4edbf..1f7754a2a4cb8 100644 --- a/aptos-move/framework/move-stdlib/sources/vector.move +++ b/aptos-move/framework/move-stdlib/sources/vector.move @@ -30,36 +30,36 @@ module std::vector { #[bytecode_instruction] /// Return the length of the vector. - native public fun length(v: &vector): u64; + native public fun length(self: &vector): u64; #[bytecode_instruction] - /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Acquire an immutable reference to the `i`th element of the vector `self`. /// Aborts if `i` is out of bounds. - native public fun borrow(v: &vector, i: u64): ∈ + native public fun borrow(self: &vector, i: u64): ∈ #[bytecode_instruction] - /// Add element `e` to the end of the vector `v`. - native public fun push_back(v: &mut vector, e: Element); + /// Add element `e` to the end of the vector `self`. + native public fun push_back(self: &mut vector, e: Element); #[bytecode_instruction] - /// Return a mutable reference to the `i`th element in the vector `v`. + /// Return a mutable reference to the `i`th element in the vector `self`. /// Aborts if `i` is out of bounds. - native public fun borrow_mut(v: &mut vector, i: u64): &mut Element; + native public fun borrow_mut(self: &mut vector, i: u64): &mut Element; #[bytecode_instruction] - /// Pop an element from the end of vector `v`. - /// Aborts if `v` is empty. - native public fun pop_back(v: &mut vector): Element; + /// Pop an element from the end of vector `self`. + /// Aborts if `self` is empty. + native public fun pop_back(self: &mut vector): Element; #[bytecode_instruction] - /// Destroy the vector `v`. - /// Aborts if `v` is not empty. - native public fun destroy_empty(v: vector); + /// Destroy the vector `self`. + /// Aborts if `self` is not empty. + native public fun destroy_empty(self: vector); #[bytecode_instruction] - /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Swaps the elements at the `i`th and `j`th indices in the vector `self`. /// Aborts if `i` or `j` is out of bounds. - native public fun swap(v: &mut vector, i: u64, j: u64); + native public fun swap(self: &mut vector, i: u64, j: u64); /// Return an vector of size one containing element `e`. public fun singleton(e: Element): vector { @@ -72,23 +72,23 @@ module std::vector { ensures result == vec(e); } - /// Reverses the order of the elements in the vector `v` in place. - public fun reverse(v: &mut vector) { - let len = length(v); - reverse_slice(v, 0, len); + /// Reverses the order of the elements in the vector `self` in place. + public fun reverse(self: &mut vector) { + let len = length(self); + reverse_slice(self, 0, len); } spec reverse { pragma intrinsic = true; } - /// Reverses the order of the elements [left, right) in the vector `v` in place. - public fun reverse_slice(v: &mut vector, left: u64, right: u64) { + /// Reverses the order of the elements [left, right) in the vector `self` in place. + public fun reverse_slice(self: &mut vector, left: u64, right: u64) { assert!(left <= right, EINVALID_RANGE); if (left == right) return; right = right - 1; while (left < right) { - swap(v, left, right); + swap(self, left, right); left = left + 1; right = right - 1; } @@ -97,10 +97,10 @@ module std::vector { pragma intrinsic = true; } - /// Pushes all of the elements of the `other` vector into the `lhs` vector. - public fun append(lhs: &mut vector, other: vector) { + /// Pushes all of the elements of the `other` vector into the `self` vector. + public fun append(self: &mut vector, other: vector) { reverse(&mut other); - reverse_append(lhs, other); + reverse_append(self, other); } spec append { pragma intrinsic = true; @@ -109,11 +109,11 @@ module std::vector { pragma intrinsic = true; } - /// Pushes all of the elements of the `other` vector into the `lhs` vector. - public fun reverse_append(lhs: &mut vector, other: vector) { + /// Pushes all of the elements of the `other` vector into the `self` vector. + public fun reverse_append(self: &mut vector, other: vector) { let len = length(&other); while (len > 0) { - push_back(lhs, pop_back(&mut other)); + push_back(self, pop_back(&mut other)); len = len - 1; }; destroy_empty(other); @@ -123,8 +123,8 @@ module std::vector { } /// Trim a vector to a smaller size, returning the evicted elements in order - public fun trim(v: &mut vector, new_len: u64): vector { - let res = trim_reverse(v, new_len); + public fun trim(self: &mut vector, new_len: u64): vector { + let res = trim_reverse(self, new_len); reverse(&mut res); res } @@ -133,12 +133,12 @@ module std::vector { } /// Trim a vector to a smaller size, returning the evicted elements in reverse order - public fun trim_reverse(v: &mut vector, new_len: u64): vector { - let len = length(v); + public fun trim_reverse(self: &mut vector, new_len: u64): vector { + let len = length(self); assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS); let result = empty(); while (new_len < len) { - push_back(&mut result, pop_back(v)); + push_back(&mut result, pop_back(self)); len = len - 1; }; result @@ -148,17 +148,17 @@ module std::vector { } - /// Return `true` if the vector `v` has no elements and `false` otherwise. - public fun is_empty(v: &vector): bool { - length(v) == 0 + /// Return `true` if the vector `self` has no elements and `false` otherwise. + public fun is_empty(self: &vector): bool { + length(self) == 0 } - /// Return true if `e` is in the vector `v`. - public fun contains(v: &vector, e: &Element): bool { + /// Return true if `e` is in the vector `self`. + public fun contains(self: &vector, e: &Element): bool { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - if (borrow(v, i) == e) return true; + if (borrow(self, i) == e) return true; i = i + 1; }; false @@ -167,13 +167,13 @@ module std::vector { pragma intrinsic = true; } - /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Return `(true, i)` if `e` is in the vector `self` at index `i`. /// Otherwise, returns `(false, 0)`. - public fun index_of(v: &vector, e: &Element): (bool, u64) { + public fun index_of(self: &vector, e: &Element): (bool, u64) { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - if (borrow(v, i) == e) return (true, i); + if (borrow(self, i) == e) return (true, i); i = i + 1; }; (false, 0) @@ -185,14 +185,14 @@ module std::vector { /// Return `(true, i)` if there's an element that matches the predicate. If there are multiple elements that match /// the predicate, only the index of the first one is returned. /// Otherwise, returns `(false, 0)`. - public inline fun find(v: &vector, f: |&Element|bool): (bool, u64) { + public inline fun find(self: &vector, f: |&Element|bool): (bool, u64) { let find = false; let found_index = 0; let i = 0; - let len = length(v); + let len = length(self); while (i < len) { // Cannot call return in an inline function so we need to resort to break here. - if (f(borrow(v, i))) { + if (f(borrow(self, i))) { find = true; found_index = i; break @@ -204,12 +204,12 @@ module std::vector { /// Insert a new element at position 0 <= i <= length, using O(length - i) time. /// Aborts if out of bounds. - public fun insert(v: &mut vector, i: u64, e: Element) { - let len = length(v); + public fun insert(self: &mut vector, i: u64, e: Element) { + let len = length(self); assert!(i <= len, EINDEX_OUT_OF_BOUNDS); - push_back(v, e); + push_back(self, e); while (i < len) { - swap(v, i, len); + swap(self, i, len); i = i + 1; }; } @@ -217,34 +217,34 @@ module std::vector { pragma intrinsic = true; } - /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// Remove the `i`th element of the vector `self`, shifting all subsequent elements. /// This is O(n) and preserves ordering of elements in the vector. /// Aborts if `i` is out of bounds. - public fun remove(v: &mut vector, i: u64): Element { - let len = length(v); + public fun remove(self: &mut vector, i: u64): Element { + let len = length(self); // i out of bounds; abort if (i >= len) abort EINDEX_OUT_OF_BOUNDS; len = len - 1; - while (i < len) swap(v, i, { i = i + 1; i }); - pop_back(v) + while (i < len) swap(self, i, { i = i + 1; i }); + pop_back(self) } spec remove { pragma intrinsic = true; } - /// Remove the first occurrence of a given value in the vector `v` and return it in a vector, shifting all + /// Remove the first occurrence of a given value in the vector `self` and return it in a vector, shifting all /// subsequent elements. /// This is O(n) and preserves ordering of elements in the vector. /// This returns an empty vector if the value isn't present in the vector. /// Note that this cannot return an option as option uses vector and there'd be a circular dependency between option /// and vector. - public fun remove_value(v: &mut vector, val: &Element): vector { + public fun remove_value(self: &mut vector, val: &Element): vector { // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found, // while remove would continue from the identified index to the end of the vector. - let (found, index) = index_of(v, val); + let (found, index) = index_of(self, val); if (found) { - vector[remove(v, index)] + vector[remove(self, index)] } else { vector[] } @@ -253,106 +253,106 @@ module std::vector { pragma intrinsic = true; } - /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// Swap the `i`th element of the vector `self` with the last element and then pop the vector. /// This is O(1), but does not preserve ordering of elements in the vector. /// Aborts if `i` is out of bounds. - public fun swap_remove(v: &mut vector, i: u64): Element { - assert!(!is_empty(v), EINDEX_OUT_OF_BOUNDS); - let last_idx = length(v) - 1; - swap(v, i, last_idx); - pop_back(v) + public fun swap_remove(self: &mut vector, i: u64): Element { + assert!(!is_empty(self), EINDEX_OUT_OF_BOUNDS); + let last_idx = length(self) - 1; + swap(self, i, last_idx); + pop_back(self) } spec swap_remove { pragma intrinsic = true; } /// Apply the function to each element in the vector, consuming it. - public inline fun for_each(v: vector, f: |Element|) { - reverse(&mut v); // We need to reverse the vector to consume it efficiently - for_each_reverse(v, |e| f(e)); + public inline fun for_each(self: vector, f: |Element|) { + reverse(&mut self); // We need to reverse the vector to consume it efficiently + for_each_reverse(self, |e| f(e)); } /// Apply the function to each element in the vector, consuming it. - public inline fun for_each_reverse(v: vector, f: |Element|) { - let len = length(&v); + public inline fun for_each_reverse(self: vector, f: |Element|) { + let len = length(&self); while (len > 0) { - f(pop_back(&mut v)); + f(pop_back(&mut self)); len = len - 1; }; - destroy_empty(v) + destroy_empty(self) } /// Apply the function to a reference of each element in the vector. - public inline fun for_each_ref(v: &vector, f: |&Element|) { + public inline fun for_each_ref(self: &vector, f: |&Element|) { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - f(borrow(v, i)); + f(borrow(self, i)); i = i + 1 } } /// Apply the function to each pair of elements in the two given vectors, consuming them. - public inline fun zip(v1: vector, v2: vector, f: |Element1, Element2|) { + public inline fun zip(self: vector, v2: vector, f: |Element1, Element2|) { // We need to reverse the vectors to consume it efficiently - reverse(&mut v1); + reverse(&mut self); reverse(&mut v2); - zip_reverse(v1, v2, |e1, e2| f(e1, e2)); + zip_reverse(self, v2, |e1, e2| f(e1, e2)); } /// Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. /// This errors out if the vectors are not of the same length. public inline fun zip_reverse( - v1: vector, + self: vector, v2: vector, f: |Element1, Element2|, ) { - let len = length(&v1); + let len = length(&self); // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. assert!(len == length(&v2), 0x20002); while (len > 0) { - f(pop_back(&mut v1), pop_back(&mut v2)); + f(pop_back(&mut self), pop_back(&mut v2)); len = len - 1; }; - destroy_empty(v1); + destroy_empty(self); destroy_empty(v2); } /// Apply the function to the references of each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_ref( - v1: &vector, + self: &vector, v2: &vector, f: |&Element1, &Element2|, ) { - let len = length(v1); + let len = length(self); // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. assert!(len == length(v2), 0x20002); let i = 0; while (i < len) { - f(borrow(v1, i), borrow(v2, i)); + f(borrow(self, i), borrow(v2, i)); i = i + 1 } } /// Apply the function to a reference of each element in the vector with its index. - public inline fun enumerate_ref(v: &vector, f: |u64, &Element|) { + public inline fun enumerate_ref(self: &vector, f: |u64, &Element|) { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - f(i, borrow(v, i)); + f(i, borrow(self, i)); i = i + 1; }; } /// Apply the function to a mutable reference to each element in the vector. - public inline fun for_each_mut(v: &mut vector, f: |&mut Element|) { + public inline fun for_each_mut(self: &mut vector, f: |&mut Element|) { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - f(borrow_mut(v, i)); + f(borrow_mut(self, i)); i = i + 1 } } @@ -360,27 +360,27 @@ module std::vector { /// Apply the function to mutable references to each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_mut( - v1: &mut vector, + self: &mut vector, v2: &mut vector, f: |&mut Element1, &mut Element2|, ) { let i = 0; - let len = length(v1); + let len = length(self); // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. assert!(len == length(v2), 0x20002); while (i < len) { - f(borrow_mut(v1, i), borrow_mut(v2, i)); + f(borrow_mut(self, i), borrow_mut(v2, i)); i = i + 1 } } /// Apply the function to a mutable reference of each element in the vector with its index. - public inline fun enumerate_mut(v: &mut vector, f: |u64, &mut Element|) { + public inline fun enumerate_mut(self: &mut vector, f: |u64, &mut Element|) { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - f(i, borrow_mut(v, i)); + f(i, borrow_mut(self, i)); i = i + 1; }; } @@ -388,86 +388,86 @@ module std::vector { /// Fold the function over the elements. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(f(f(0, 1), 2), 3)` public inline fun fold( - v: vector, + self: vector, init: Accumulator, f: |Accumulator,Element|Accumulator ): Accumulator { let accu = init; - for_each(v, |elem| accu = f(accu, elem)); + for_each(self, |elem| accu = f(accu, elem)); accu } /// Fold right like fold above but working right to left. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(1, f(2, f(3, 0)))` public inline fun foldr( - v: vector, + self: vector, init: Accumulator, f: |Element, Accumulator|Accumulator ): Accumulator { let accu = init; - for_each_reverse(v, |elem| accu = f(elem, accu)); + for_each_reverse(self, |elem| accu = f(elem, accu)); accu } /// Map the function over the references of the elements of the vector, producing a new vector without modifying the /// original vector. public inline fun map_ref( - v: &vector, + self: &vector, f: |&Element|NewElement ): vector { let result = vector[]; - for_each_ref(v, |elem| push_back(&mut result, f(elem))); + for_each_ref(self, |elem| push_back(&mut result, f(elem))); result } /// Map the function over the references of the element pairs of two vectors, producing a new vector from the return /// values without modifying the original vectors. public inline fun zip_map_ref( - v1: &vector, + self: &vector, v2: &vector, f: |&Element1, &Element2|NewElement ): vector { // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(length(v1) == length(v2), 0x20002); + assert!(length(self) == length(v2), 0x20002); let result = vector[]; - zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + zip_ref(self, v2, |e1, e2| push_back(&mut result, f(e1, e2))); result } /// Map the function over the elements of the vector, producing a new vector. public inline fun map( - v: vector, + self: vector, f: |Element|NewElement ): vector { let result = vector[]; - for_each(v, |elem| push_back(&mut result, f(elem))); + for_each(self, |elem| push_back(&mut result, f(elem))); result } /// Map the function over the element pairs of the two vectors, producing a new vector. public inline fun zip_map( - v1: vector, + self: vector, v2: vector, f: |Element1, Element2|NewElement ): vector { // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(length(&v1) == length(&v2), 0x20002); + assert!(length(&self) == length(&v2), 0x20002); let result = vector[]; - zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + zip(self, v2, |e1, e2| push_back(&mut result, f(e1, e2))); result } /// Filter the vector using the boolean function, removing all elements for which `p(e)` is not true. public inline fun filter( - v: vector, + self: vector, p: |&Element|bool ): vector { let result = vector[]; - for_each(v, |elem| { + for_each(self, |elem| { if (p(&elem)) push_back(&mut result, elem); }); result @@ -477,20 +477,20 @@ module std::vector { /// Preserves the relative order of the elements for which pred is true, /// BUT NOT for the elements for which pred is false. public inline fun partition( - v: &mut vector, + self: &mut vector, pred: |&Element|bool ): u64 { let i = 0; - let len = length(v); + let len = length(self); while (i < len) { - if (!pred(borrow(v, i))) break; + if (!pred(borrow(self, i))) break; i = i + 1; }; let p = i; i = i + 1; while (i < len) { - if (pred(borrow(v, i))) { - swap(v, p, i); + if (pred(borrow(self, i))) { + swap(self, p, i); p = p + 1; }; i = i + 1; @@ -501,11 +501,11 @@ module std::vector { /// rotate(&mut [1, 2, 3, 4, 5], 2) -> [3, 4, 5, 1, 2] in place, returns the split point /// ie. 3 in the example above public fun rotate( - v: &mut vector, + self: &mut vector, rot: u64 ): u64 { - let len = length(v); - rotate_slice(v, 0, rot, len) + let len = length(self); + rotate_slice(self, 0, rot, len) } spec rotate { pragma intrinsic = true; @@ -514,14 +514,14 @@ module std::vector { /// Same as above but on a sub-slice of an array [left, right) with left <= rot <= right /// returns the public fun rotate_slice( - v: &mut vector, + self: &mut vector, left: u64, rot: u64, right: u64 ): u64 { - reverse_slice(v, left, rot); - reverse_slice(v, rot, right); - reverse_slice(v, left, right); + reverse_slice(self, left, rot); + reverse_slice(self, rot, right); + reverse_slice(self, left, right); left + (right - rot) } spec rotate_slice { @@ -531,14 +531,14 @@ module std::vector { /// Partition the array based on a predicate p, this routine is stable and thus /// preserves the relative order of the elements in the two partitions. public inline fun stable_partition( - v: &mut vector, + self: &mut vector, p: |&Element|bool ): u64 { - let len = length(v); + let len = length(self); let t = empty(); let f = empty(); while (len > 0) { - let e = pop_back(v); + let e = pop_back(self); if (p(&e)) { push_back(&mut t, e); } else { @@ -547,20 +547,20 @@ module std::vector { len = len - 1; }; let pos = length(&t); - reverse_append(v, t); - reverse_append(v, f); + reverse_append(self, t); + reverse_append(self, f); pos } /// Return true if any element in the vector satisfies the predicate. public inline fun any( - v: &vector, + self: &vector, p: |&Element|bool ): bool { let result = false; let i = 0; - while (i < length(v)) { - result = p(borrow(v, i)); + while (i < length(self)) { + result = p(borrow(self, i)); if (result) { break }; @@ -571,13 +571,13 @@ module std::vector { /// Return true if all elements in the vector satisfy the predicate. public inline fun all( - v: &vector, + self: &vector, p: |&Element|bool ): bool { let result = true; let i = 0; - while (i < length(v)) { - result = p(borrow(v, i)); + while (i < length(self)) { + result = p(borrow(self, i)); if (!result) { break }; @@ -589,10 +589,10 @@ module std::vector { /// Destroy a vector, just a wrapper around for_each_reverse with a descriptive name /// when used in the context of destroying a vector. public inline fun destroy( - v: vector, + self: vector, d: |Element| ) { - for_each_reverse(v, |e| d(e)) + for_each_reverse(self, |e| d(e)) } public fun range(start: u64, end: u64): vector { @@ -611,15 +611,15 @@ module std::vector { } public fun slice( - v: &vector, + self: &vector, start: u64, end: u64 ): vector { - assert!(start <= end && end <= length(v), EINVALID_SLICE_RANGE); + assert!(start <= end && end <= length(self), EINVALID_SLICE_RANGE); let vec = vector[]; while (start < end) { - push_back(&mut vec, *borrow(v, start)); + push_back(&mut vec, *borrow(self, start)); start = start + 1; }; vec @@ -633,24 +633,24 @@ module std::vector { /// # Helper Functions spec module { - /// Check if `v1` is equal to the result of adding `e` at the end of `v2` - fun eq_push_back(v1: vector, v2: vector, e: Element): bool { - len(v1) == len(v2) + 1 && - v1[len(v1)-1] == e && - v1[0..len(v1)-1] == v2[0..len(v2)] + /// Check if `self` is equal to the result of adding `e` at the end of `v2` + fun eq_push_back(self: vector, v2: vector, e: Element): bool { + len(self) == len(v2) + 1 && + self[len(self)-1] == e && + self[0..len(self)-1] == v2[0..len(v2)] } - /// Check if `v` is equal to the result of concatenating `v1` and `v2` - fun eq_append(v: vector, v1: vector, v2: vector): bool { - len(v) == len(v1) + len(v2) && - v[0..len(v1)] == v1 && - v[len(v1)..len(v)] == v2 + /// Check if `self` is equal to the result of concatenating `v1` and `v2` + fun eq_append(self: vector, v1: vector, v2: vector): bool { + len(self) == len(v1) + len(v2) && + self[0..len(v1)] == v1 && + self[len(v1)..len(self)] == v2 } - /// Check `v1` is equal to the result of removing the first element of `v2` - fun eq_pop_front(v1: vector, v2: vector): bool { - len(v1) + 1 == len(v2) && - v1 == v2[1..len(v2)] + /// Check `self` is equal to the result of removing the first element of `v2` + fun eq_pop_front(self: vector, v2: vector): bool { + len(self) + 1 == len(v2) && + self == v2[1..len(v2)] } /// Check that `v1` is equal to the result of removing the element at index `i` from `v2`. @@ -660,9 +660,9 @@ module std::vector { v1[i..len(v1)] == v2[i + 1..len(v2)] } - /// Check if `v` contains `e`. - fun spec_contains(v: vector, e: Element): bool { - exists x in v: x == e + /// Check if `self` contains `e`. + fun spec_contains(self: vector, e: Element): bool { + exists x in self: x == e } } diff --git a/aptos-move/framework/move-stdlib/src/natives/bcs.rs b/aptos-move/framework/move-stdlib/src/natives/bcs.rs index c5941a401e06c..283e890c1cdf7 100644 --- a/aptos-move/framework/move-stdlib/src/natives/bcs.rs +++ b/aptos-move/framework/move-stdlib/src/natives/bcs.rs @@ -16,6 +16,8 @@ use move_core_types::{ use move_vm_runtime::native_functions::NativeFunction; use move_vm_types::{ loaded_data::runtime_types::Type, + natives::function::PartialVMResult, + value_serde::serialized_size_allowing_delayed_values, values::{values_impl::Reference, Value}, }; use smallvec::{smallvec, SmallVec}; @@ -42,11 +44,9 @@ fn native_to_bytes( debug_assert!(ty_args.len() == 1); debug_assert!(args.len() == 1); - // pop type and value let ref_to_val = safely_pop_arg!(args, Reference); let arg_type = ty_args.pop().unwrap(); - // get type layout let layout = match context.type_to_type_layout(&arg_type) { Ok(layout) => layout, Err(_) => { @@ -57,8 +57,10 @@ fn native_to_bytes( }, }; - // serialize value + // TODO(#14175): Reading the reference performs a deep copy, and we can + // implement it in a more efficient way. let val = ref_to_val.read_ref()?; + let serialized_value = match val.simple_serialize(&layout) { Some(serialized_value) => serialized_value, None => { @@ -74,13 +76,66 @@ fn native_to_bytes( Ok(smallvec![Value::vector_u8(serialized_value)]) } +/*************************************************************************************************** + * native fun serialized_size + * + * gas cost: size_of(output) + * + * If the getting the type layout or serialization results in error, a special failure + * cost is charged. + * + **************************************************************************************************/ +fn native_serialized_size( + context: &mut SafeNativeContext, + mut ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(ty_args.len() == 1); + debug_assert!(args.len() == 1); + + context.charge(BCS_SERIALIZED_SIZE_BASE)?; + + let reference = safely_pop_arg!(args, Reference); + let ty = ty_args.pop().unwrap(); + + let serialized_size = match serialized_size_impl(context, reference, &ty) { + Ok(serialized_size) => serialized_size as u64, + Err(_) => { + context.charge(BCS_SERIALIZED_SIZE_FAILURE)?; + + // Re-use the same abort code as bcs::to_bytes. + return Err(SafeNativeError::Abort { + abort_code: NFE_BCS_SERIALIZATION_FAILURE, + }); + }, + }; + context.charge(BCS_SERIALIZED_SIZE_PER_BYTE_SERIALIZED * NumBytes::new(serialized_size))?; + + Ok(smallvec![Value::u64(serialized_size)]) +} + +fn serialized_size_impl( + context: &mut SafeNativeContext, + reference: Reference, + ty: &Type, +) -> PartialVMResult { + // TODO(#14175): Reading the reference performs a deep copy, and we can + // implement it in a more efficient way. + let value = reference.read_ref()?; + let ty_layout = context.type_to_type_layout(ty)?; + serialized_size_allowing_delayed_values(&value, &ty_layout) +} + /*************************************************************************************************** * module **************************************************************************************************/ pub fn make_all( builder: &SafeNativeBuilder, ) -> impl Iterator + '_ { - let funcs = [("to_bytes", native_to_bytes as RawSafeNative)]; + let funcs = [ + ("to_bytes", native_to_bytes as RawSafeNative), + ("serialized_size", native_serialized_size), + ]; builder.make_named_natives(funcs) } diff --git a/aptos-move/framework/move-stdlib/tests/bcs_tests.move b/aptos-move/framework/move-stdlib/tests/bcs_tests.move index 72437ebb00fd6..367f504044694 100644 --- a/aptos-move/framework/move-stdlib/tests/bcs_tests.move +++ b/aptos-move/framework/move-stdlib/tests/bcs_tests.move @@ -1,6 +1,7 @@ #[test_only] module std::bcs_tests { use std::bcs; + use std::vector; struct Box has copy, drop, store { x: T } struct Box3 has copy, drop, store { x: Box> } @@ -10,44 +11,61 @@ module std::bcs_tests { struct Box63 has copy, drop, store { x: Box31> } struct Box127 has copy, drop, store { x: Box63> } - /* Deactivated because of address size dependency - #[test] - fun bcs_address() { - let addr = @0x89b9f9d1fadc027cf9532d6f99041522; - let expected_output = x"89b9f9d1fadc027cf9532d6f99041522"; - assert!(bcs::to_bytes(&addr) == expected_output, 0); - } - */ - #[test] fun bcs_bool() { - let expected_output = x"01"; - assert!(bcs::to_bytes(&true) == expected_output, 0); + let expected_bytes = x"01"; + let actual_bytes = bcs::to_bytes(&true); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = vector::length(&actual_bytes); + let actual_size = bcs::serialized_size(&true); + assert!(actual_size == expected_size, 1); } #[test] fun bcs_u8() { - let expected_output = x"01"; - assert!(bcs::to_bytes(&1u8) == expected_output, 0); + let expected_bytes = x"01"; + let actual_bytes = bcs::to_bytes(&1u8); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = vector::length(&actual_bytes); + let actual_size = bcs::serialized_size(&1u8); + assert!(actual_size == expected_size, 1); } #[test] fun bcs_u64() { - let expected_output = x"0100000000000000"; - assert!(bcs::to_bytes(&1) == expected_output, 0); + let expected_bytes = x"0100000000000000"; + let actual_bytes = bcs::to_bytes(&1); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = vector::length(&actual_bytes); + let actual_size = bcs::serialized_size(&1); + assert!(actual_size == expected_size, 1); } #[test] fun bcs_u128() { - let expected_output = x"01000000000000000000000000000000"; - assert!(bcs::to_bytes(&1u128) == expected_output, 0); + let expected_bytes = x"01000000000000000000000000000000"; + let actual_bytes = bcs::to_bytes(&1u128); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = vector::length(&actual_bytes); + let actual_size = bcs::serialized_size(&1u128); + assert!(actual_size == expected_size, 1); } #[test] fun bcs_vec_u8() { let v = x"0f"; - let expected_output = x"010f"; - assert!(bcs::to_bytes(&v) == expected_output, 0); + + let expected_bytes = x"010f"; + let actual_bytes = bcs::to_bytes(&v); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = vector::length(&actual_bytes); + let actual_size = bcs::serialized_size(&v); + assert!(actual_size == expected_size, 1); } fun box3(x: T): Box3 { @@ -76,14 +94,12 @@ module std::bcs_tests { #[test] fun encode_128() { - bcs::to_bytes(&box127(true)); - } + let box = box127(true); - /* Deactivated because we now limit the depth of values you could create inside the VM - #[test] - #[expected_failure(abort_code = 453, location = std::bcs)] - fun encode_129() { - bcs::to_bytes(&Box { x: box127(true) }); + let bytes = bcs::to_bytes(&box); + let expected_size = vector::length(&bytes); + + let actual_size = bcs::serialized_size(&box); + assert!(actual_size == expected_size, 0); } - */ } diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/acl.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/acl.md new file mode 100644 index 0000000000000..66bf91f9ac9ea --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/acl.md @@ -0,0 +1,320 @@ + + + +# Module `0x1::acl` + +Access control list (acl) module. An acl is a list of account addresses who +have the access permission to a certain object. +This module uses a vector to represent the list, but can be refactored to +use a "set" instead when it's available in the language in the future. + + +- [Struct `ACL`](#0x1_acl_ACL) +- [Constants](#@Constants_0) +- [Function `empty`](#0x1_acl_empty) +- [Function `add`](#0x1_acl_add) +- [Function `remove`](#0x1_acl_remove) +- [Function `contains`](#0x1_acl_contains) +- [Function `assert_contains`](#0x1_acl_assert_contains) +- [Specification](#@Specification_1) + - [Struct `ACL`](#@Specification_1_ACL) + - [Function `add`](#@Specification_1_add) + - [Function `remove`](#@Specification_1_remove) + - [Function `contains`](#@Specification_1_contains) + - [Function `assert_contains`](#@Specification_1_assert_contains) + + +
use 0x1::error;
+use 0x1::vector;
+
+ + + + + +## Struct `ACL` + + + +
struct ACL has copy, drop, store
+
+ + + +
+Fields + + +
+
+list: vector<address> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The ACL already contains the address. + + +
const ECONTAIN: u64 = 0;
+
+ + + + + +The ACL does not contain the address. + + +
const ENOT_CONTAIN: u64 = 1;
+
+ + + + + +## Function `empty` + +Return an empty ACL. + + +
public fun empty(): acl::ACL
+
+ + + +
+Implementation + + +
public fun empty(): ACL {
+    ACL{ list: vector::empty<address>() }
+}
+
+ + + +
+ + + +## Function `add` + +Add the address to the ACL. + + +
public fun add(self: &mut acl::ACL, addr: address)
+
+ + + +
+Implementation + + +
public fun add(self: &mut ACL, addr: address) {
+    assert!(!vector::contains(&mut self.list, &addr), error::invalid_argument(ECONTAIN));
+    vector::push_back(&mut self.list, addr);
+}
+
+ + + +
+ + + +## Function `remove` + +Remove the address from the ACL. + + +
public fun remove(self: &mut acl::ACL, addr: address)
+
+ + + +
+Implementation + + +
public fun remove(self: &mut ACL, addr: address) {
+    let (found, index) = vector::index_of(&mut self.list, &addr);
+    assert!(found, error::invalid_argument(ENOT_CONTAIN));
+    vector::remove(&mut self.list, index);
+}
+
+ + + +
+ + + +## Function `contains` + +Return true iff the ACL contains the address. + + +
public fun contains(self: &acl::ACL, addr: address): bool
+
+ + + +
+Implementation + + +
public fun contains(self: &ACL, addr: address): bool {
+    vector::contains(&self.list, &addr)
+}
+
+ + + +
+ + + +## Function `assert_contains` + +assert! that the ACL has the address. + + +
public fun assert_contains(self: &acl::ACL, addr: address)
+
+ + + +
+Implementation + + +
public fun assert_contains(self: &ACL, addr: address) {
+    assert!(contains(self, addr), error::invalid_argument(ENOT_CONTAIN));
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `ACL` + + +
struct ACL has copy, drop, store
+
+ + + +
+
+list: vector<address> +
+
+ +
+
+ + + +
invariant forall i in 0..len(list), j in 0..len(list): list[i] == list[j] ==> i == j;
+
+ + + + + + + +
fun spec_contains(self: ACL, addr: address): bool {
+   exists a in self.list: a == addr
+}
+
+ + + + + +### Function `add` + + +
public fun add(self: &mut acl::ACL, addr: address)
+
+ + + + +
aborts_if spec_contains(self, addr) with error::INVALID_ARGUMENT;
+ensures spec_contains(self, addr);
+
+ + + + + +### Function `remove` + + +
public fun remove(self: &mut acl::ACL, addr: address)
+
+ + + + +
aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT;
+ensures !spec_contains(self, addr);
+
+ + + + + +### Function `contains` + + +
public fun contains(self: &acl::ACL, addr: address): bool
+
+ + + + +
ensures result == spec_contains(self, addr);
+
+ + + + + +### Function `assert_contains` + + +
public fun assert_contains(self: &acl::ACL, addr: address)
+
+ + + + +
aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/bcs.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/bcs.md new file mode 100644 index 0000000000000..9bdcf7f45f7d3 --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/bcs.md @@ -0,0 +1,103 @@ + + + +# Module `0x1::bcs` + +Utility for converting a Move value to its binary representation in BCS (Binary Canonical +Serialization). BCS is the binary encoding for Move resources and other non-module values +published on-chain. See https://github.com/aptos-labs/bcs#binary-canonical-serialization-bcs for more +details on BCS. + + +- [Function `to_bytes`](#0x1_bcs_to_bytes) +- [Function `serialized_size`](#0x1_bcs_serialized_size) +- [Specification](#@Specification_0) + - [Function `serialized_size`](#@Specification_0_serialized_size) + + +
+ + + + + +## Function `to_bytes` + +Returns the binary representation of v in BCS (Binary Canonical Serialization) format. +Aborts with 0x1c5 error code if serialization fails. + + +
public fun to_bytes<MoveValue>(v: &MoveValue): vector<u8>
+
+ + + +
+Implementation + + +
native public fun to_bytes<MoveValue>(v: &MoveValue): vector<u8>;
+
+ + + +
+ + + +## Function `serialized_size` + +Returns the size of the binary representation of v in BCS (Binary Canonical Serialization) format. +Aborts with 0x1c5 error code if there is a failure when calculating serialized size. + + +
public fun serialized_size<MoveValue>(v: &MoveValue): u64
+
+ + + +
+Implementation + + +
native public fun serialized_size<MoveValue>(v: &MoveValue): u64;
+
+ + + +
+ + + +## Specification + + + +Native function which is defined in the prover's prelude. + + + + + +
native fun serialize<MoveValue>(v: &MoveValue): vector<u8>;
+
+ + + + + +### Function `serialized_size` + + +
public fun serialized_size<MoveValue>(v: &MoveValue): u64
+
+ + + + +
pragma opaque;
+ensures result == len(serialize(v));
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/bit_vector.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/bit_vector.md new file mode 100644 index 0000000000000..78fef563ffebc --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/bit_vector.md @@ -0,0 +1,553 @@ + + + +# Module `0x1::bit_vector` + + + +- [Struct `BitVector`](#0x1_bit_vector_BitVector) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_bit_vector_new) +- [Function `set`](#0x1_bit_vector_set) +- [Function `unset`](#0x1_bit_vector_unset) +- [Function `shift_left`](#0x1_bit_vector_shift_left) +- [Function `is_index_set`](#0x1_bit_vector_is_index_set) +- [Function `length`](#0x1_bit_vector_length) +- [Function `longest_set_sequence_starting_at`](#0x1_bit_vector_longest_set_sequence_starting_at) +- [Specification](#@Specification_1) + - [Struct `BitVector`](#@Specification_1_BitVector) + - [Function `new`](#@Specification_1_new) + - [Function `set`](#@Specification_1_set) + - [Function `unset`](#@Specification_1_unset) + - [Function `shift_left`](#@Specification_1_shift_left) + - [Function `is_index_set`](#@Specification_1_is_index_set) + - [Function `longest_set_sequence_starting_at`](#@Specification_1_longest_set_sequence_starting_at) + + +
+ + + + + +## Struct `BitVector` + + + +
struct BitVector has copy, drop, store
+
+ + + +
+Fields + + +
+
+length: u64 +
+
+ +
+
+bit_field: vector<bool> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The provided index is out of bounds + + +
const EINDEX: u64 = 131072;
+
+ + + + + +An invalid length of bitvector was given + + +
const ELENGTH: u64 = 131073;
+
+ + + + + +The maximum allowed bitvector size + + +
const MAX_SIZE: u64 = 1024;
+
+ + + + + + + +
const WORD_SIZE: u64 = 1;
+
+ + + + + +## Function `new` + + + +
public fun new(length: u64): bit_vector::BitVector
+
+ + + +
+Implementation + + +
public fun new(length: u64): BitVector {
+    assert!(length > 0, ELENGTH);
+    assert!(length < MAX_SIZE, ELENGTH);
+    let counter = 0;
+    let bit_field = vector::empty();
+    while ({spec {
+        invariant counter <= length;
+        invariant len(bit_field) == counter;
+    };
+        (counter < length)}) {
+        vector::push_back(&mut bit_field, false);
+        counter = counter + 1;
+    };
+    spec {
+        assert counter == length;
+        assert len(bit_field) == length;
+    };
+
+    BitVector {
+        length,
+        bit_field,
+    }
+}
+
+ + + +
+ + + +## Function `set` + +Set the bit at bit_index in the self regardless of its previous state. + + +
public fun set(self: &mut bit_vector::BitVector, bit_index: u64)
+
+ + + +
+Implementation + + +
public fun set(self: &mut BitVector, bit_index: u64) {
+    assert!(bit_index < vector::length(&self.bit_field), EINDEX);
+    let x = vector::borrow_mut(&mut self.bit_field, bit_index);
+    *x = true;
+}
+
+ + + +
+ + + +## Function `unset` + +Unset the bit at bit_index in the self regardless of its previous state. + + +
public fun unset(self: &mut bit_vector::BitVector, bit_index: u64)
+
+ + + +
+Implementation + + +
public fun unset(self: &mut BitVector, bit_index: u64) {
+    assert!(bit_index < vector::length(&self.bit_field), EINDEX);
+    let x = vector::borrow_mut(&mut self.bit_field, bit_index);
+    *x = false;
+}
+
+ + + +
+ + + +## Function `shift_left` + +Shift the self left by amount. If amount is greater than the +bitvector's length the bitvector will be zeroed out. + + +
public fun shift_left(self: &mut bit_vector::BitVector, amount: u64)
+
+ + + +
+Implementation + + +
public fun shift_left(self: &mut BitVector, amount: u64) {
+    if (amount >= self.length) {
+        vector::for_each_mut(&mut self.bit_field, |elem| {
+            *elem = false;
+        });
+    } else {
+        let i = amount;
+
+        while (i < self.length) {
+            if (is_index_set(self, i)) set(self, i - amount)
+            else unset(self, i - amount);
+            i = i + 1;
+        };
+
+        i = self.length - amount;
+
+        while (i < self.length) {
+            unset(self, i);
+            i = i + 1;
+        };
+    }
+}
+
+ + + +
+ + + +## Function `is_index_set` + +Return the value of the bit at bit_index in the self. true +represents "1" and false represents a 0 + + +
public fun is_index_set(self: &bit_vector::BitVector, bit_index: u64): bool
+
+ + + +
+Implementation + + +
public fun is_index_set(self: &BitVector, bit_index: u64): bool {
+    assert!(bit_index < vector::length(&self.bit_field), EINDEX);
+    *vector::borrow(&self.bit_field, bit_index)
+}
+
+ + + +
+ + + +## Function `length` + +Return the length (number of usable bits) of this bitvector + + +
public fun length(self: &bit_vector::BitVector): u64
+
+ + + +
+Implementation + + +
public fun length(self: &BitVector): u64 {
+    vector::length(&self.bit_field)
+}
+
+ + + +
+ + + +## Function `longest_set_sequence_starting_at` + +Returns the length of the longest sequence of set bits starting at (and +including) start_index in the bitvector. If there is no such +sequence, then 0 is returned. + + +
public fun longest_set_sequence_starting_at(self: &bit_vector::BitVector, start_index: u64): u64
+
+ + + +
+Implementation + + +
public fun longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 {
+    assert!(start_index < self.length, EINDEX);
+    let index = start_index;
+
+    // Find the greatest index in the vector such that all indices less than it are set.
+    while ({
+        spec {
+            invariant index >= start_index;
+            invariant index == start_index || is_index_set(self, index - 1);
+            invariant index == start_index || index - 1 < vector::length(self.bit_field);
+            invariant forall j in start_index..index: is_index_set(self, j);
+            invariant forall j in start_index..index: j < vector::length(self.bit_field);
+        };
+        index < self.length
+    }) {
+        if (!is_index_set(self, index)) break;
+        index = index + 1;
+    };
+
+    index - start_index
+}
+
+ + + +
+ + + +## Specification + + + + +### Struct `BitVector` + + +
struct BitVector has copy, drop, store
+
+ + + +
+
+length: u64 +
+
+ +
+
+bit_field: vector<bool> +
+
+ +
+
+ + + +
invariant length == len(bit_field);
+
+ + + + + +### Function `new` + + +
public fun new(length: u64): bit_vector::BitVector
+
+ + + + +
include NewAbortsIf;
+ensures result.length == length;
+ensures len(result.bit_field) == length;
+
+ + + + + + + +
schema NewAbortsIf {
+    length: u64;
+    aborts_if length <= 0 with ELENGTH;
+    aborts_if length >= MAX_SIZE with ELENGTH;
+}
+
+ + + + + +### Function `set` + + +
public fun set(self: &mut bit_vector::BitVector, bit_index: u64)
+
+ + + + +
include SetAbortsIf;
+ensures self.bit_field[bit_index];
+
+ + + + + + + +
schema SetAbortsIf {
+    self: BitVector;
+    bit_index: u64;
+    aborts_if bit_index >= length(self) with EINDEX;
+}
+
+ + + + + +### Function `unset` + + +
public fun unset(self: &mut bit_vector::BitVector, bit_index: u64)
+
+ + + + +
include UnsetAbortsIf;
+ensures !self.bit_field[bit_index];
+
+ + + + + + + +
schema UnsetAbortsIf {
+    self: BitVector;
+    bit_index: u64;
+    aborts_if bit_index >= length(self) with EINDEX;
+}
+
+ + + + + +### Function `shift_left` + + +
public fun shift_left(self: &mut bit_vector::BitVector, amount: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `is_index_set` + + +
public fun is_index_set(self: &bit_vector::BitVector, bit_index: u64): bool
+
+ + + + +
include IsIndexSetAbortsIf;
+ensures result == self.bit_field[bit_index];
+
+ + + + + + + +
schema IsIndexSetAbortsIf {
+    self: BitVector;
+    bit_index: u64;
+    aborts_if bit_index >= length(self) with EINDEX;
+}
+
+ + + + + + + +
fun spec_is_index_set(self: BitVector, bit_index: u64): bool {
+   if (bit_index >= length(self)) {
+       false
+   } else {
+       self.bit_field[bit_index]
+   }
+}
+
+ + + + + +### Function `longest_set_sequence_starting_at` + + +
public fun longest_set_sequence_starting_at(self: &bit_vector::BitVector, start_index: u64): u64
+
+ + + + +
aborts_if start_index >= self.length;
+ensures forall i in start_index..result: is_index_set(self, i);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/error.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/error.md new file mode 100644 index 0000000000000..0aa82d86871a4 --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/error.md @@ -0,0 +1,500 @@ + + + +# Module `0x1::error` + +This module defines a set of canonical error codes which are optional to use by applications for the +abort and assert! features. + +Canonical error codes use the 3 lowest bytes of the u64 abort code range (the upper 5 bytes are free for other use). +Of those, the highest byte represents the *error category* and the lower two bytes the *error reason*. +Given an error category 0x1 and a reason 0x3, a canonical abort code looks as 0x10003. + +A module can use a canonical code with a constant declaration of the following form: + +``` +/// An invalid ASCII character was encountered when creating a string. +const EINVALID_CHARACTER: u64 = 0x010003; +``` + +This code is both valid in the worlds with and without canonical errors. It can be used as a plain module local +error reason understand by the existing error map tooling, or as a canonical code. + +The actual canonical categories have been adopted from Google's canonical error codes, which in turn are derived +from Unix error codes [see here](https://cloud.google.com/apis/design/errors#handling_errors). Each code has an +associated HTTP error code which can be used in REST apis. The mapping from error code to http code is not 1:1; +error codes here are a bit richer than HTTP codes. + + +- [Constants](#@Constants_0) +- [Function `canonical`](#0x1_error_canonical) +- [Function `invalid_argument`](#0x1_error_invalid_argument) +- [Function `out_of_range`](#0x1_error_out_of_range) +- [Function `invalid_state`](#0x1_error_invalid_state) +- [Function `unauthenticated`](#0x1_error_unauthenticated) +- [Function `permission_denied`](#0x1_error_permission_denied) +- [Function `not_found`](#0x1_error_not_found) +- [Function `aborted`](#0x1_error_aborted) +- [Function `already_exists`](#0x1_error_already_exists) +- [Function `resource_exhausted`](#0x1_error_resource_exhausted) +- [Function `internal`](#0x1_error_internal) +- [Function `not_implemented`](#0x1_error_not_implemented) +- [Function `unavailable`](#0x1_error_unavailable) +- [Specification](#@Specification_1) + - [Function `canonical`](#@Specification_1_canonical) + + +
+ + + + + +## Constants + + + + +Concurrency conflict, such as read-modify-write conflict (http: 409) + + +
const ABORTED: u64 = 7;
+
+ + + + + +The resource that a client tried to create already exists (http: 409) + + +
const ALREADY_EXISTS: u64 = 8;
+
+ + + + + +Request cancelled by the client (http: 499) + + +
const CANCELLED: u64 = 10;
+
+ + + + + +Internal error (http: 500) + + +
const INTERNAL: u64 = 11;
+
+ + + + + +Caller specified an invalid argument (http: 400) + + +
const INVALID_ARGUMENT: u64 = 1;
+
+ + + + + +The system is not in a state where the operation can be performed (http: 400) + + +
const INVALID_STATE: u64 = 3;
+
+ + + + + +A specified resource is not found (http: 404) + + +
const NOT_FOUND: u64 = 6;
+
+ + + + + +Feature not implemented (http: 501) + + +
const NOT_IMPLEMENTED: u64 = 12;
+
+ + + + + +An input or result of a computation is out of range (http: 400) + + +
const OUT_OF_RANGE: u64 = 2;
+
+ + + + + +client does not have sufficient permission (http: 403) + + +
const PERMISSION_DENIED: u64 = 5;
+
+ + + + + +Out of gas or other forms of quota (http: 429) + + +
const RESOURCE_EXHAUSTED: u64 = 9;
+
+ + + + + +Request not authenticated due to missing, invalid, or expired auth token (http: 401) + + +
const UNAUTHENTICATED: u64 = 4;
+
+ + + + + +The service is currently unavailable. Indicates that a retry could solve the issue (http: 503) + + +
const UNAVAILABLE: u64 = 13;
+
+ + + + + +## Function `canonical` + +Construct a canonical error code from a category and a reason. + + +
public fun canonical(category: u64, reason: u64): u64
+
+ + + +
+Implementation + + +
public fun canonical(category: u64, reason: u64): u64 {
+  (category << 16) + reason
+}
+
+ + + +
+ + + +## Function `invalid_argument` + +Functions to construct a canonical error code of the given category. + + +
public fun invalid_argument(r: u64): u64
+
+ + + +
+Implementation + + +
public fun invalid_argument(r: u64): u64 {  canonical(INVALID_ARGUMENT, r) }
+
+ + + +
+ + + +## Function `out_of_range` + + + +
public fun out_of_range(r: u64): u64
+
+ + + +
+Implementation + + +
public fun out_of_range(r: u64): u64 {  canonical(OUT_OF_RANGE, r) }
+
+ + + +
+ + + +## Function `invalid_state` + + + +
public fun invalid_state(r: u64): u64
+
+ + + +
+Implementation + + +
public fun invalid_state(r: u64): u64 {  canonical(INVALID_STATE, r) }
+
+ + + +
+ + + +## Function `unauthenticated` + + + +
public fun unauthenticated(r: u64): u64
+
+ + + +
+Implementation + + +
public fun unauthenticated(r: u64): u64 { canonical(UNAUTHENTICATED, r) }
+
+ + + +
+ + + +## Function `permission_denied` + + + +
public fun permission_denied(r: u64): u64
+
+ + + +
+Implementation + + +
public fun permission_denied(r: u64): u64 { canonical(PERMISSION_DENIED, r) }
+
+ + + +
+ + + +## Function `not_found` + + + +
public fun not_found(r: u64): u64
+
+ + + +
+Implementation + + +
public fun not_found(r: u64): u64 { canonical(NOT_FOUND, r) }
+
+ + + +
+ + + +## Function `aborted` + + + +
public fun aborted(r: u64): u64
+
+ + + +
+Implementation + + +
public fun aborted(r: u64): u64 { canonical(ABORTED, r) }
+
+ + + +
+ + + +## Function `already_exists` + + + +
public fun already_exists(r: u64): u64
+
+ + + +
+Implementation + + +
public fun already_exists(r: u64): u64 { canonical(ALREADY_EXISTS, r) }
+
+ + + +
+ + + +## Function `resource_exhausted` + + + +
public fun resource_exhausted(r: u64): u64
+
+ + + +
+Implementation + + +
public fun resource_exhausted(r: u64): u64 {  canonical(RESOURCE_EXHAUSTED, r) }
+
+ + + +
+ + + +## Function `internal` + + + +
public fun internal(r: u64): u64
+
+ + + +
+Implementation + + +
public fun internal(r: u64): u64 {  canonical(INTERNAL, r) }
+
+ + + +
+ + + +## Function `not_implemented` + + + +
public fun not_implemented(r: u64): u64
+
+ + + +
+Implementation + + +
public fun not_implemented(r: u64): u64 {  canonical(NOT_IMPLEMENTED, r) }
+
+ + + +
+ + + +## Function `unavailable` + + + +
public fun unavailable(r: u64): u64
+
+ + + +
+Implementation + + +
public fun unavailable(r: u64): u64 { canonical(UNAVAILABLE, r) }
+
+ + + +
+ + + +## Specification + + + + +### Function `canonical` + + +
public fun canonical(category: u64, reason: u64): u64
+
+ + + + +
pragma opaque = true;
+let shl_res = category << 16;
+ensures [concrete] result == shl_res + reason;
+aborts_if [abstract] false;
+ensures [abstract] result == category;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/features.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/features.md new file mode 100644 index 0000000000000..ea0b4f9f17740 --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/features.md @@ -0,0 +1,3933 @@ + + + +# Module `0x1::features` + +Defines feature flags for Aptos. Those are used in Aptos specific implementations of features in +the Move stdlib, the Aptos stdlib, and the Aptos framework. + +============================================================================================ +Feature Flag Definitions + +Each feature flag should come with documentation which justifies the need of the flag. +Introduction of a new feature flag requires approval of framework owners. Be frugal when +introducing new feature flags, as too many can make it hard to understand the code. + +Each feature flag should come with a specification of a lifetime: + +- a *transient* feature flag is only needed until a related code rollout has happened. This +is typically associated with the introduction of new native Move functions, and is only used +from Move code. The owner of this feature is obliged to remove it once this can be done. + +- a *permanent* feature flag is required to stay around forever. Typically, those flags guard +behavior in native code, and the behavior with or without the feature need to be preserved +for playback. + +Note that removing a feature flag still requires the function which tests for the feature +(like code_dependency_check_enabled below) to stay around for compatibility reasons, as it +is a public function. However, once the feature flag is disabled, those functions can constantly +return true. + + +- [Resource `Features`](#0x1_features_Features) +- [Resource `PendingFeatures`](#0x1_features_PendingFeatures) +- [Constants](#@Constants_0) +- [Function `code_dependency_check_enabled`](#0x1_features_code_dependency_check_enabled) +- [Function `treat_friend_as_private`](#0x1_features_treat_friend_as_private) +- [Function `get_sha_512_and_ripemd_160_feature`](#0x1_features_get_sha_512_and_ripemd_160_feature) +- [Function `sha_512_and_ripemd_160_enabled`](#0x1_features_sha_512_and_ripemd_160_enabled) +- [Function `get_aptos_stdlib_chain_id_feature`](#0x1_features_get_aptos_stdlib_chain_id_feature) +- [Function `aptos_stdlib_chain_id_enabled`](#0x1_features_aptos_stdlib_chain_id_enabled) +- [Function `get_vm_binary_format_v6`](#0x1_features_get_vm_binary_format_v6) +- [Function `allow_vm_binary_format_v6`](#0x1_features_allow_vm_binary_format_v6) +- [Function `get_collect_and_distribute_gas_fees_feature`](#0x1_features_get_collect_and_distribute_gas_fees_feature) +- [Function `collect_and_distribute_gas_fees`](#0x1_features_collect_and_distribute_gas_fees) +- [Function `multi_ed25519_pk_validate_v2_feature`](#0x1_features_multi_ed25519_pk_validate_v2_feature) +- [Function `multi_ed25519_pk_validate_v2_enabled`](#0x1_features_multi_ed25519_pk_validate_v2_enabled) +- [Function `get_blake2b_256_feature`](#0x1_features_get_blake2b_256_feature) +- [Function `blake2b_256_enabled`](#0x1_features_blake2b_256_enabled) +- [Function `get_resource_groups_feature`](#0x1_features_get_resource_groups_feature) +- [Function `resource_groups_enabled`](#0x1_features_resource_groups_enabled) +- [Function `get_multisig_accounts_feature`](#0x1_features_get_multisig_accounts_feature) +- [Function `multisig_accounts_enabled`](#0x1_features_multisig_accounts_enabled) +- [Function `get_delegation_pools_feature`](#0x1_features_get_delegation_pools_feature) +- [Function `delegation_pools_enabled`](#0x1_features_delegation_pools_enabled) +- [Function `get_cryptography_algebra_natives_feature`](#0x1_features_get_cryptography_algebra_natives_feature) +- [Function `cryptography_algebra_enabled`](#0x1_features_cryptography_algebra_enabled) +- [Function `get_bls12_381_strutures_feature`](#0x1_features_get_bls12_381_strutures_feature) +- [Function `bls12_381_structures_enabled`](#0x1_features_bls12_381_structures_enabled) +- [Function `get_periodical_reward_rate_decrease_feature`](#0x1_features_get_periodical_reward_rate_decrease_feature) +- [Function `periodical_reward_rate_decrease_enabled`](#0x1_features_periodical_reward_rate_decrease_enabled) +- [Function `get_partial_governance_voting`](#0x1_features_get_partial_governance_voting) +- [Function `partial_governance_voting_enabled`](#0x1_features_partial_governance_voting_enabled) +- [Function `get_delegation_pool_partial_governance_voting`](#0x1_features_get_delegation_pool_partial_governance_voting) +- [Function `delegation_pool_partial_governance_voting_enabled`](#0x1_features_delegation_pool_partial_governance_voting_enabled) +- [Function `fee_payer_enabled`](#0x1_features_fee_payer_enabled) +- [Function `get_auids`](#0x1_features_get_auids) +- [Function `auids_enabled`](#0x1_features_auids_enabled) +- [Function `get_bulletproofs_feature`](#0x1_features_get_bulletproofs_feature) +- [Function `bulletproofs_enabled`](#0x1_features_bulletproofs_enabled) +- [Function `get_signer_native_format_fix_feature`](#0x1_features_get_signer_native_format_fix_feature) +- [Function `signer_native_format_fix_enabled`](#0x1_features_signer_native_format_fix_enabled) +- [Function `get_module_event_feature`](#0x1_features_get_module_event_feature) +- [Function `module_event_enabled`](#0x1_features_module_event_enabled) +- [Function `get_aggregator_v2_api_feature`](#0x1_features_get_aggregator_v2_api_feature) +- [Function `aggregator_v2_api_enabled`](#0x1_features_aggregator_v2_api_enabled) +- [Function `get_aggregator_snapshots_feature`](#0x1_features_get_aggregator_snapshots_feature) +- [Function `aggregator_snapshots_enabled`](#0x1_features_aggregator_snapshots_enabled) +- [Function `get_sponsored_automatic_account_creation`](#0x1_features_get_sponsored_automatic_account_creation) +- [Function `sponsored_automatic_account_creation_enabled`](#0x1_features_sponsored_automatic_account_creation_enabled) +- [Function `get_concurrent_token_v2_feature`](#0x1_features_get_concurrent_token_v2_feature) +- [Function `concurrent_token_v2_enabled`](#0x1_features_concurrent_token_v2_enabled) +- [Function `get_concurrent_assets_feature`](#0x1_features_get_concurrent_assets_feature) +- [Function `concurrent_assets_enabled`](#0x1_features_concurrent_assets_enabled) +- [Function `get_operator_beneficiary_change_feature`](#0x1_features_get_operator_beneficiary_change_feature) +- [Function `operator_beneficiary_change_enabled`](#0x1_features_operator_beneficiary_change_enabled) +- [Function `get_commission_change_delegation_pool_feature`](#0x1_features_get_commission_change_delegation_pool_feature) +- [Function `commission_change_delegation_pool_enabled`](#0x1_features_commission_change_delegation_pool_enabled) +- [Function `get_bn254_strutures_feature`](#0x1_features_get_bn254_strutures_feature) +- [Function `bn254_structures_enabled`](#0x1_features_bn254_structures_enabled) +- [Function `get_reconfigure_with_dkg_feature`](#0x1_features_get_reconfigure_with_dkg_feature) +- [Function `reconfigure_with_dkg_enabled`](#0x1_features_reconfigure_with_dkg_enabled) +- [Function `get_keyless_accounts_feature`](#0x1_features_get_keyless_accounts_feature) +- [Function `keyless_accounts_enabled`](#0x1_features_keyless_accounts_enabled) +- [Function `get_keyless_but_zkless_accounts_feature`](#0x1_features_get_keyless_but_zkless_accounts_feature) +- [Function `keyless_but_zkless_accounts_feature_enabled`](#0x1_features_keyless_but_zkless_accounts_feature_enabled) +- [Function `get_jwk_consensus_feature`](#0x1_features_get_jwk_consensus_feature) +- [Function `jwk_consensus_enabled`](#0x1_features_jwk_consensus_enabled) +- [Function `get_concurrent_fungible_assets_feature`](#0x1_features_get_concurrent_fungible_assets_feature) +- [Function `concurrent_fungible_assets_enabled`](#0x1_features_concurrent_fungible_assets_enabled) +- [Function `is_object_code_deployment_enabled`](#0x1_features_is_object_code_deployment_enabled) +- [Function `get_max_object_nesting_check_feature`](#0x1_features_get_max_object_nesting_check_feature) +- [Function `max_object_nesting_check_enabled`](#0x1_features_max_object_nesting_check_enabled) +- [Function `get_keyless_accounts_with_passkeys_feature`](#0x1_features_get_keyless_accounts_with_passkeys_feature) +- [Function `keyless_accounts_with_passkeys_feature_enabled`](#0x1_features_keyless_accounts_with_passkeys_feature_enabled) +- [Function `get_multisig_v2_enhancement_feature`](#0x1_features_get_multisig_v2_enhancement_feature) +- [Function `multisig_v2_enhancement_feature_enabled`](#0x1_features_multisig_v2_enhancement_feature_enabled) +- [Function `get_delegation_pool_allowlisting_feature`](#0x1_features_get_delegation_pool_allowlisting_feature) +- [Function `delegation_pool_allowlisting_enabled`](#0x1_features_delegation_pool_allowlisting_enabled) +- [Function `get_module_event_migration_feature`](#0x1_features_get_module_event_migration_feature) +- [Function `module_event_migration_enabled`](#0x1_features_module_event_migration_enabled) +- [Function `get_transaction_context_extension_feature`](#0x1_features_get_transaction_context_extension_feature) +- [Function `transaction_context_extension_enabled`](#0x1_features_transaction_context_extension_enabled) +- [Function `get_coin_to_fungible_asset_migration_feature`](#0x1_features_get_coin_to_fungible_asset_migration_feature) +- [Function `coin_to_fungible_asset_migration_feature_enabled`](#0x1_features_coin_to_fungible_asset_migration_feature_enabled) +- [Function `get_primary_apt_fungible_store_at_user_address_feature`](#0x1_features_get_primary_apt_fungible_store_at_user_address_feature) +- [Function `primary_apt_fungible_store_at_user_address_enabled`](#0x1_features_primary_apt_fungible_store_at_user_address_enabled) +- [Function `aggregator_v2_is_at_least_api_enabled`](#0x1_features_aggregator_v2_is_at_least_api_enabled) +- [Function `get_object_native_derived_address_feature`](#0x1_features_get_object_native_derived_address_feature) +- [Function `object_native_derived_address_enabled`](#0x1_features_object_native_derived_address_enabled) +- [Function `get_dispatchable_fungible_asset_feature`](#0x1_features_get_dispatchable_fungible_asset_feature) +- [Function `dispatchable_fungible_asset_enabled`](#0x1_features_dispatchable_fungible_asset_enabled) +- [Function `get_new_accounts_default_to_fa_apt_store_feature`](#0x1_features_get_new_accounts_default_to_fa_apt_store_feature) +- [Function `new_accounts_default_to_fa_apt_store_enabled`](#0x1_features_new_accounts_default_to_fa_apt_store_enabled) +- [Function `get_operations_default_to_fa_apt_store_feature`](#0x1_features_get_operations_default_to_fa_apt_store_feature) +- [Function `operations_default_to_fa_apt_store_enabled`](#0x1_features_operations_default_to_fa_apt_store_enabled) +- [Function `get_concurrent_fungible_balance_feature`](#0x1_features_get_concurrent_fungible_balance_feature) +- [Function `concurrent_fungible_balance_enabled`](#0x1_features_concurrent_fungible_balance_enabled) +- [Function `get_default_to_concurrent_fungible_balance_feature`](#0x1_features_get_default_to_concurrent_fungible_balance_feature) +- [Function `default_to_concurrent_fungible_balance_enabled`](#0x1_features_default_to_concurrent_fungible_balance_enabled) +- [Function `get_abort_if_multisig_payload_mismatch_feature`](#0x1_features_get_abort_if_multisig_payload_mismatch_feature) +- [Function `abort_if_multisig_payload_mismatch_enabled`](#0x1_features_abort_if_multisig_payload_mismatch_enabled) +- [Function `get_transaction_simulation_enhancement_feature`](#0x1_features_get_transaction_simulation_enhancement_feature) +- [Function `transaction_simulation_enhancement_enabled`](#0x1_features_transaction_simulation_enhancement_enabled) +- [Function `get_collection_owner_feature`](#0x1_features_get_collection_owner_feature) +- [Function `is_collection_owner_enabled`](#0x1_features_is_collection_owner_enabled) +- [Function `change_feature_flags`](#0x1_features_change_feature_flags) +- [Function `change_feature_flags_internal`](#0x1_features_change_feature_flags_internal) +- [Function `change_feature_flags_for_next_epoch`](#0x1_features_change_feature_flags_for_next_epoch) +- [Function `on_new_epoch`](#0x1_features_on_new_epoch) +- [Function `is_enabled`](#0x1_features_is_enabled) +- [Function `set`](#0x1_features_set) +- [Function `contains`](#0x1_features_contains) +- [Function `apply_diff`](#0x1_features_apply_diff) +- [Function `ensure_framework_signer`](#0x1_features_ensure_framework_signer) +- [Specification](#@Specification_1) + - [Resource `Features`](#@Specification_1_Features) + - [Resource `PendingFeatures`](#@Specification_1_PendingFeatures) + - [Function `periodical_reward_rate_decrease_enabled`](#@Specification_1_periodical_reward_rate_decrease_enabled) + - [Function `partial_governance_voting_enabled`](#@Specification_1_partial_governance_voting_enabled) + - [Function `module_event_enabled`](#@Specification_1_module_event_enabled) + - [Function `abort_if_multisig_payload_mismatch_enabled`](#@Specification_1_abort_if_multisig_payload_mismatch_enabled) + - [Function `change_feature_flags_internal`](#@Specification_1_change_feature_flags_internal) + - [Function `change_feature_flags_for_next_epoch`](#@Specification_1_change_feature_flags_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `is_enabled`](#@Specification_1_is_enabled) + - [Function `set`](#@Specification_1_set) + - [Function `contains`](#@Specification_1_contains) + - [Function `apply_diff`](#@Specification_1_apply_diff) + + +
use 0x1::error;
+use 0x1::signer;
+use 0x1::vector;
+
+ + + + + +## Resource `Features` + +The enabled features, represented by a bitset stored on chain. + + +
struct Features has key
+
+ + + +
+Fields + + +
+
+features: vector<u8> +
+
+ +
+
+ + +
+ + + +## Resource `PendingFeatures` + +This resource holds the feature vec updates received in the current epoch. +On epoch change, the updates take effect and this buffer is cleared. + + +
struct PendingFeatures has key
+
+ + + +
+Fields + + +
+
+features: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Whether the multisig v2 fix is enabled. Once enabled, the multisig transaction execution will explicitly +abort if the provided payload does not match the payload stored on-chain. + +Lifetime: transient + + +
const ABORT_IF_MULTISIG_PAYLOAD_MISMATCH: u64 = 70;
+
+ + + + + + + +
const AGGREGATOR_V2_IS_AT_LEAST_API: u64 = 66;
+
+ + + + + +Whether the new aptos_stdlib::type_info::chain_id() native for fetching the chain ID is enabled. +This is needed because of the introduction of a new native function. +Lifetime: transient + + +
const APTOS_STD_CHAIN_ID_NATIVES: u64 = 4;
+
+ + + + + +Whether enable MOVE functions to call create_auid method to create AUIDs. +Lifetime: transient + + +
const APTOS_UNIQUE_IDENTIFIERS: u64 = 23;
+
+ + + + + +Whether the new BLAKE2B-256 hash function native is enabled. +This is needed because of the introduction of new native function(s). +Lifetime: transient + + +
const BLAKE2B_256_NATIVE: u64 = 8;
+
+ + + + + +Whether the generic algebra implementation for BLS12381 operations are enabled. + +Lifetime: transient + + +
const BLS12_381_STRUCTURES: u64 = 13;
+
+ + + + + +Whether the generic algebra implementation for BN254 operations are enabled. + +Lifetime: transient + + +
const BN254_STRUCTURES: u64 = 43;
+
+ + + + + +Whether the Bulletproofs zero-knowledge range proof module is enabled, and the related native function is +available. This is needed because of the introduction of a new native function. +Lifetime: transient + + +
const BULLETPROOFS_NATIVES: u64 = 24;
+
+ + + + + +Charge invariant violation error. +Lifetime: transient + + +
const CHARGE_INVARIANT_VIOLATION: u64 = 20;
+
+ + + + + +Whether validation of package dependencies is enabled, and the related native function is +available. This is needed because of introduction of a new native function. +Lifetime: transient + + +
const CODE_DEPENDENCY_CHECK: u64 = 1;
+
+ + + + + +Whether migration from coin to fungible asset feature is enabled. + +Lifetime: transient + + +
const COIN_TO_FUNGIBLE_ASSET_MIGRATION: u64 = 60;
+
+ + + + + + + +
const COLLECTION_OWNER: u64 = 79;
+
+ + + + + +Whether gas fees are collected and distributed to the block proposers. +Lifetime: transient + + +
const COLLECT_AND_DISTRIBUTE_GAS_FEES: u64 = 6;
+
+ + + + + +Whether the operator commission rate change in delegation pool is enabled. +Lifetime: transient + + +
const COMMISSION_CHANGE_DELEGATION_POOL: u64 = 42;
+
+ + + + + +Whether enable Fungible Asset creation +to create higher throughput concurrent variants. +Lifetime: transient + + +
const CONCURRENT_FUNGIBLE_ASSETS: u64 = 50;
+
+ + + + + +Whether enable concurent Fungible Balance +to create higher throughput concurrent variants. +Lifetime: transient + + +
const CONCURRENT_FUNGIBLE_BALANCE: u64 = 67;
+
+ + + + + +Whether generic algebra basic operation support in crypto_algebra.move are enabled. + +Lifetime: transient + + +
const CRYPTOGRAPHY_ALGEBRA_NATIVES: u64 = 12;
+
+ + + + + +Whether to default new Fungible Store to the concurrent variant. +Lifetime: transient + + +
const DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE: u64 = 68;
+
+ + + + + +Whether delegation pools are enabled. +Lifetime: transient + + +
const DELEGATION_POOLS: u64 = 11;
+
+ + + + + +Whether delegators allowlisting for delegation pools is supported. +Lifetime: transient + + +
const DELEGATION_POOL_ALLOWLISTING: u64 = 56;
+
+ + + + + +Whether enable paritial governance voting on delegation_pool. +Lifetime: transient + + +
const DELEGATION_POOL_PARTIAL_GOVERNANCE_VOTING: u64 = 21;
+
+ + + + + +Whether the dispatchable fungible asset standard feature is enabled. + +Lifetime: transient + + +
const DISPATCHABLE_FUNGIBLE_ASSET: u64 = 63;
+
+ + + + + + + +
const EAPI_DISABLED: u64 = 2;
+
+ + + + + +Whether native_public_key_validate aborts when a public key of the wrong length is given +Lifetime: ephemeral + + +
const ED25519_PUBKEY_VALIDATE_RETURN_FALSE_WRONG_LENGTH: u64 = 14;
+
+ + + + + +Deployed to production, and disabling is deprecated. + + +
const EFEATURE_CANNOT_BE_DISABLED: u64 = 3;
+
+ + + + + +The provided signer has not a framework address. + + +
const EFRAMEWORK_SIGNER_NEEDED: u64 = 1;
+
+ + + + + + + +
const EINVALID_FEATURE: u64 = 1;
+
+ + + + + + + +
const FEE_PAYER_ACCOUNT_OPTIONAL: u64 = 35;
+
+ + + + + +Whether alternate gas payer is supported +Lifetime: transient + + +
const FEE_PAYER_ENABLED: u64 = 22;
+
+ + + + + +Deprecated by aptos_framework::jwk_consensus_config::JWKConsensusConfig. + + +
const JWK_CONSENSUS: u64 = 49;
+
+ + + + + +Whether the OIDB feature is enabled, possibly with the ZK-less verification mode. + +Lifetime: transient + + +
const KEYLESS_ACCOUNTS: u64 = 46;
+
+ + + + + +Whether keyless accounts support passkey-based ephemeral signatures. + +Lifetime: transient + + +
const KEYLESS_ACCOUNTS_WITH_PASSKEYS: u64 = 54;
+
+ + + + + +Whether the ZK-less mode of the keyless accounts feature is enabled. + +Lifetime: transient + + +
const KEYLESS_BUT_ZKLESS_ACCOUNTS: u64 = 47;
+
+ + + + + + + +
const LIMIT_MAX_IDENTIFIER_LENGTH: u64 = 38;
+
+ + + + + +Whether checking the maximum object nesting is enabled. + + +
const MAX_OBJECT_NESTING_CHECK: u64 = 53;
+
+ + + + + +Whether emit function in event.move are enabled for module events. + +Lifetime: transient + + +
const MODULE_EVENT: u64 = 26;
+
+ + + + + +Whether aptos_framwork enables the behavior of module event migration. + +Lifetime: transient + + +
const MODULE_EVENT_MIGRATION: u64 = 57;
+
+ + + + + +Whether multisig accounts (different from accounts with multi-ed25519 auth keys) are enabled. + + +
const MULTISIG_ACCOUNTS: u64 = 10;
+
+ + + + + +Whether the Multisig V2 enhancement feature is enabled. + +Lifetime: transient + + +
const MULTISIG_V2_ENHANCEMENT: u64 = 55;
+
+ + + + + +Whether the new aptos_stdlib::multi_ed25519::public_key_validate_internal_v2() native is enabled. +This is needed because of the introduction of a new native function. +Lifetime: transient + + +
const MULTI_ED25519_PK_VALIDATE_V2_NATIVES: u64 = 7;
+
+ + + + + +Lifetime: transient + + +
const NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE: u64 = 64;
+
+ + + + + +Whether deploying to objects is enabled. + + +
const OBJECT_CODE_DEPLOYMENT: u64 = 52;
+
+ + + + + +Whether we use more efficient native implementation of computing object derived address + + +
const OBJECT_NATIVE_DERIVED_ADDRESS: u64 = 62;
+
+ + + + + +Lifetime: transient + + +
const OPERATIONS_DEFAULT_TO_FA_APT_STORE: u64 = 65;
+
+ + + + + +Whether allow changing beneficiaries for operators. +Lifetime: transient + + +
const OPERATOR_BENEFICIARY_CHANGE: u64 = 39;
+
+ + + + + +Whether enable paritial governance voting on aptos_governance. +Lifetime: transient + + +
const PARTIAL_GOVERNANCE_VOTING: u64 = 17;
+
+ + + + + +Whether reward rate decreases periodically. +Lifetime: transient + + +
const PERIODICAL_REWARD_RATE_DECREASE: u64 = 16;
+
+ + + + + + + +
const PRIMARY_APT_FUNGIBLE_STORE_AT_USER_ADDRESS: u64 = 61;
+
+ + + + + +Deprecated by aptos_framework::randomness_config::RandomnessConfig. + + +
const RECONFIGURE_WITH_DKG: u64 = 45;
+
+ + + + + +Whether resource groups are enabled. +This is needed because of new attributes for structs and a change in storage representation. + + +
const RESOURCE_GROUPS: u64 = 9;
+
+ + + + + + + +
const RESOURCE_GROUPS_SPLIT_IN_VM_CHANGE_SET: u64 = 41;
+
+ + + + + + + +
const SAFER_METADATA: u64 = 32;
+
+ + + + + + + +
const SAFER_RESOURCE_GROUPS: u64 = 31;
+
+ + + + + +Whether the new SHA2-512, SHA3-512 and RIPEMD-160 hash function natives are enabled. +This is needed because of the introduction of new native functions. +Lifetime: transient + + +
const SHA_512_AND_RIPEMD_160_NATIVES: u64 = 3;
+
+ + + + + +Whether the fix for a counting bug in the script path of the signature checker pass is enabled. +Lifetime: transient + + +
const SIGNATURE_CHECKER_V2_SCRIPT_FIX: u64 = 29;
+
+ + + + + +Fix the native formatter for signer. +Lifetime: transient + + +
const SIGNER_NATIVE_FORMAT_FIX: u64 = 25;
+
+ + + + + + + +
const SINGLE_SENDER_AUTHENTICATOR: u64 = 33;
+
+ + + + + +Whether the automatic creation of accounts is enabled for sponsored transactions. +Lifetime: transient + + +
const SPONSORED_AUTOMATIC_ACCOUNT_CREATION: u64 = 34;
+
+ + + + + +Whether struct constructors are enabled + +Lifetime: transient + + +
const STRUCT_CONSTRUCTORS: u64 = 15;
+
+ + + + + +Whether the transaction context extension is enabled. This feature allows the module +transaction_context to provide contextual information about the user transaction. + +Lifetime: transient + + +
const TRANSACTION_CONTEXT_EXTENSION: u64 = 59;
+
+ + + + + +Whether the simulation enhancement is enabled. This enables the simulation without an authentication check, +the sponsored transaction simulation when the fee payer is set to 0x0, and the multisig transaction +simulation consistnet with the execution. + +Lifetime: transient + + +
const TRANSACTION_SIMULATION_ENHANCEMENT: u64 = 78;
+
+ + + + + +Whether during upgrade compatibility checking, friend functions should be treated similar like +private functions. +Lifetime: permanent + + +
const TREAT_FRIEND_AS_PRIVATE: u64 = 2;
+
+ + + + + +Whether to allow the use of binary format version v6. +Lifetime: transient + + +
const VM_BINARY_FORMAT_V6: u64 = 5;
+
+ + + + + + + +
const VM_BINARY_FORMAT_V7: u64 = 40;
+
+ + + + + +## Function `code_dependency_check_enabled` + + + +
public fun code_dependency_check_enabled(): bool
+
+ + + +
+Implementation + + +
public fun code_dependency_check_enabled(): bool acquires Features {
+    is_enabled(CODE_DEPENDENCY_CHECK)
+}
+
+ + + +
+ + + +## Function `treat_friend_as_private` + + + +
public fun treat_friend_as_private(): bool
+
+ + + +
+Implementation + + +
public fun treat_friend_as_private(): bool acquires Features {
+    is_enabled(TREAT_FRIEND_AS_PRIVATE)
+}
+
+ + + +
+ + + +## Function `get_sha_512_and_ripemd_160_feature` + + + +
public fun get_sha_512_and_ripemd_160_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_sha_512_and_ripemd_160_feature(): u64 { SHA_512_AND_RIPEMD_160_NATIVES }
+
+ + + +
+ + + +## Function `sha_512_and_ripemd_160_enabled` + + + +
public fun sha_512_and_ripemd_160_enabled(): bool
+
+ + + +
+Implementation + + +
public fun sha_512_and_ripemd_160_enabled(): bool acquires Features {
+    is_enabled(SHA_512_AND_RIPEMD_160_NATIVES)
+}
+
+ + + +
+ + + +## Function `get_aptos_stdlib_chain_id_feature` + + + +
public fun get_aptos_stdlib_chain_id_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_aptos_stdlib_chain_id_feature(): u64 { APTOS_STD_CHAIN_ID_NATIVES }
+
+ + + +
+ + + +## Function `aptos_stdlib_chain_id_enabled` + + + +
public fun aptos_stdlib_chain_id_enabled(): bool
+
+ + + +
+Implementation + + +
public fun aptos_stdlib_chain_id_enabled(): bool acquires Features {
+    is_enabled(APTOS_STD_CHAIN_ID_NATIVES)
+}
+
+ + + +
+ + + +## Function `get_vm_binary_format_v6` + + + +
public fun get_vm_binary_format_v6(): u64
+
+ + + +
+Implementation + + +
public fun get_vm_binary_format_v6(): u64 { VM_BINARY_FORMAT_V6 }
+
+ + + +
+ + + +## Function `allow_vm_binary_format_v6` + + + +
public fun allow_vm_binary_format_v6(): bool
+
+ + + +
+Implementation + + +
public fun allow_vm_binary_format_v6(): bool acquires Features {
+    is_enabled(VM_BINARY_FORMAT_V6)
+}
+
+ + + +
+ + + +## Function `get_collect_and_distribute_gas_fees_feature` + + + +
public fun get_collect_and_distribute_gas_fees_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_collect_and_distribute_gas_fees_feature(): u64 { COLLECT_AND_DISTRIBUTE_GAS_FEES }
+
+ + + +
+ + + +## Function `collect_and_distribute_gas_fees` + + + +
public fun collect_and_distribute_gas_fees(): bool
+
+ + + +
+Implementation + + +
public fun collect_and_distribute_gas_fees(): bool acquires Features {
+    is_enabled(COLLECT_AND_DISTRIBUTE_GAS_FEES)
+}
+
+ + + +
+ + + +## Function `multi_ed25519_pk_validate_v2_feature` + + + +
public fun multi_ed25519_pk_validate_v2_feature(): u64
+
+ + + +
+Implementation + + +
public fun multi_ed25519_pk_validate_v2_feature(): u64 { MULTI_ED25519_PK_VALIDATE_V2_NATIVES }
+
+ + + +
+ + + +## Function `multi_ed25519_pk_validate_v2_enabled` + + + +
public fun multi_ed25519_pk_validate_v2_enabled(): bool
+
+ + + +
+Implementation + + +
public fun multi_ed25519_pk_validate_v2_enabled(): bool acquires Features {
+    is_enabled(MULTI_ED25519_PK_VALIDATE_V2_NATIVES)
+}
+
+ + + +
+ + + +## Function `get_blake2b_256_feature` + + + +
public fun get_blake2b_256_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_blake2b_256_feature(): u64 { BLAKE2B_256_NATIVE }
+
+ + + +
+ + + +## Function `blake2b_256_enabled` + + + +
public fun blake2b_256_enabled(): bool
+
+ + + +
+Implementation + + +
public fun blake2b_256_enabled(): bool acquires Features {
+    is_enabled(BLAKE2B_256_NATIVE)
+}
+
+ + + +
+ + + +## Function `get_resource_groups_feature` + + + +
public fun get_resource_groups_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_resource_groups_feature(): u64 { RESOURCE_GROUPS }
+
+ + + +
+ + + +## Function `resource_groups_enabled` + + + +
public fun resource_groups_enabled(): bool
+
+ + + +
+Implementation + + +
public fun resource_groups_enabled(): bool acquires Features {
+    is_enabled(RESOURCE_GROUPS)
+}
+
+ + + +
+ + + +## Function `get_multisig_accounts_feature` + + + +
public fun get_multisig_accounts_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_multisig_accounts_feature(): u64 { MULTISIG_ACCOUNTS }
+
+ + + +
+ + + +## Function `multisig_accounts_enabled` + + + +
public fun multisig_accounts_enabled(): bool
+
+ + + +
+Implementation + + +
public fun multisig_accounts_enabled(): bool acquires Features {
+    is_enabled(MULTISIG_ACCOUNTS)
+}
+
+ + + +
+ + + +## Function `get_delegation_pools_feature` + + + +
public fun get_delegation_pools_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_delegation_pools_feature(): u64 { DELEGATION_POOLS }
+
+ + + +
+ + + +## Function `delegation_pools_enabled` + + + +
public fun delegation_pools_enabled(): bool
+
+ + + +
+Implementation + + +
public fun delegation_pools_enabled(): bool acquires Features {
+    is_enabled(DELEGATION_POOLS)
+}
+
+ + + +
+ + + +## Function `get_cryptography_algebra_natives_feature` + + + +
public fun get_cryptography_algebra_natives_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_cryptography_algebra_natives_feature(): u64 { CRYPTOGRAPHY_ALGEBRA_NATIVES }
+
+ + + +
+ + + +## Function `cryptography_algebra_enabled` + + + +
public fun cryptography_algebra_enabled(): bool
+
+ + + +
+Implementation + + +
public fun cryptography_algebra_enabled(): bool acquires Features {
+    is_enabled(CRYPTOGRAPHY_ALGEBRA_NATIVES)
+}
+
+ + + +
+ + + +## Function `get_bls12_381_strutures_feature` + + + +
public fun get_bls12_381_strutures_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_bls12_381_strutures_feature(): u64 { BLS12_381_STRUCTURES }
+
+ + + +
+ + + +## Function `bls12_381_structures_enabled` + + + +
public fun bls12_381_structures_enabled(): bool
+
+ + + +
+Implementation + + +
public fun bls12_381_structures_enabled(): bool acquires Features {
+    is_enabled(BLS12_381_STRUCTURES)
+}
+
+ + + +
+ + + +## Function `get_periodical_reward_rate_decrease_feature` + + + +
public fun get_periodical_reward_rate_decrease_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_periodical_reward_rate_decrease_feature(): u64 { PERIODICAL_REWARD_RATE_DECREASE }
+
+ + + +
+ + + +## Function `periodical_reward_rate_decrease_enabled` + + + +
public fun periodical_reward_rate_decrease_enabled(): bool
+
+ + + +
+Implementation + + +
public fun periodical_reward_rate_decrease_enabled(): bool acquires Features {
+    is_enabled(PERIODICAL_REWARD_RATE_DECREASE)
+}
+
+ + + +
+ + + +## Function `get_partial_governance_voting` + + + +
public fun get_partial_governance_voting(): u64
+
+ + + +
+Implementation + + +
public fun get_partial_governance_voting(): u64 { PARTIAL_GOVERNANCE_VOTING }
+
+ + + +
+ + + +## Function `partial_governance_voting_enabled` + + + +
public fun partial_governance_voting_enabled(): bool
+
+ + + +
+Implementation + + +
public fun partial_governance_voting_enabled(): bool acquires Features {
+    is_enabled(PARTIAL_GOVERNANCE_VOTING)
+}
+
+ + + +
+ + + +## Function `get_delegation_pool_partial_governance_voting` + + + +
public fun get_delegation_pool_partial_governance_voting(): u64
+
+ + + +
+Implementation + + +
public fun get_delegation_pool_partial_governance_voting(): u64 { DELEGATION_POOL_PARTIAL_GOVERNANCE_VOTING }
+
+ + + +
+ + + +## Function `delegation_pool_partial_governance_voting_enabled` + + + +
public fun delegation_pool_partial_governance_voting_enabled(): bool
+
+ + + +
+Implementation + + +
public fun delegation_pool_partial_governance_voting_enabled(): bool acquires Features {
+    is_enabled(DELEGATION_POOL_PARTIAL_GOVERNANCE_VOTING)
+}
+
+ + + +
+ + + +## Function `fee_payer_enabled` + + + +
public fun fee_payer_enabled(): bool
+
+ + + +
+Implementation + + +
public fun fee_payer_enabled(): bool acquires Features {
+    is_enabled(FEE_PAYER_ENABLED)
+}
+
+ + + +
+ + + +## Function `get_auids` + + + +
public fun get_auids(): u64
+
+ + + +
+Implementation + + +
public fun get_auids(): u64 {
+    error::invalid_argument(EFEATURE_CANNOT_BE_DISABLED)
+ }
+
+ + + +
+ + + +## Function `auids_enabled` + + + +
public fun auids_enabled(): bool
+
+ + + +
+Implementation + + +
public fun auids_enabled(): bool {
+    true
+}
+
+ + + +
+ + + +## Function `get_bulletproofs_feature` + + + +
public fun get_bulletproofs_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_bulletproofs_feature(): u64 { BULLETPROOFS_NATIVES }
+
+ + + +
+ + + +## Function `bulletproofs_enabled` + + + +
public fun bulletproofs_enabled(): bool
+
+ + + +
+Implementation + + +
public fun bulletproofs_enabled(): bool acquires Features {
+    is_enabled(BULLETPROOFS_NATIVES)
+}
+
+ + + +
+ + + +## Function `get_signer_native_format_fix_feature` + + + +
public fun get_signer_native_format_fix_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_signer_native_format_fix_feature(): u64 { SIGNER_NATIVE_FORMAT_FIX }
+
+ + + +
+ + + +## Function `signer_native_format_fix_enabled` + + + +
public fun signer_native_format_fix_enabled(): bool
+
+ + + +
+Implementation + + +
public fun signer_native_format_fix_enabled(): bool acquires Features {
+    is_enabled(SIGNER_NATIVE_FORMAT_FIX)
+}
+
+ + + +
+ + + +## Function `get_module_event_feature` + + + +
public fun get_module_event_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_module_event_feature(): u64 { MODULE_EVENT }
+
+ + + +
+ + + +## Function `module_event_enabled` + + + +
public fun module_event_enabled(): bool
+
+ + + +
+Implementation + + +
public fun module_event_enabled(): bool acquires Features {
+    is_enabled(MODULE_EVENT)
+}
+
+ + + +
+ + + +## Function `get_aggregator_v2_api_feature` + + + +
public fun get_aggregator_v2_api_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_aggregator_v2_api_feature(): u64 {
+    abort error::invalid_argument(EFEATURE_CANNOT_BE_DISABLED)
+}
+
+ + + +
+ + + +## Function `aggregator_v2_api_enabled` + + + +
public fun aggregator_v2_api_enabled(): bool
+
+ + + +
+Implementation + + +
public fun aggregator_v2_api_enabled(): bool {
+    true
+}
+
+ + + +
+ + + +## Function `get_aggregator_snapshots_feature` + + + +
#[deprecated]
+public fun get_aggregator_snapshots_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_aggregator_snapshots_feature(): u64 {
+    abort error::invalid_argument(EINVALID_FEATURE)
+}
+
+ + + +
+ + + +## Function `aggregator_snapshots_enabled` + + + +
#[deprecated]
+public fun aggregator_snapshots_enabled(): bool
+
+ + + +
+Implementation + + +
public fun aggregator_snapshots_enabled(): bool {
+    abort error::invalid_argument(EINVALID_FEATURE)
+}
+
+ + + +
+ + + +## Function `get_sponsored_automatic_account_creation` + + + +
public fun get_sponsored_automatic_account_creation(): u64
+
+ + + +
+Implementation + + +
public fun get_sponsored_automatic_account_creation(): u64 { SPONSORED_AUTOMATIC_ACCOUNT_CREATION }
+
+ + + +
+ + + +## Function `sponsored_automatic_account_creation_enabled` + + + +
public fun sponsored_automatic_account_creation_enabled(): bool
+
+ + + +
+Implementation + + +
public fun sponsored_automatic_account_creation_enabled(): bool acquires Features {
+    is_enabled(SPONSORED_AUTOMATIC_ACCOUNT_CREATION)
+}
+
+ + + +
+ + + +## Function `get_concurrent_token_v2_feature` + + + +
public fun get_concurrent_token_v2_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_concurrent_token_v2_feature(): u64 {
+    error::invalid_argument(EFEATURE_CANNOT_BE_DISABLED)
+}
+
+ + + +
+ + + +## Function `concurrent_token_v2_enabled` + + + +
public fun concurrent_token_v2_enabled(): bool
+
+ + + +
+Implementation + + +
public fun concurrent_token_v2_enabled(): bool {
+    true
+}
+
+ + + +
+ + + +## Function `get_concurrent_assets_feature` + + + +
#[deprecated]
+public fun get_concurrent_assets_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_concurrent_assets_feature(): u64 {
+    abort error::invalid_argument(EFEATURE_CANNOT_BE_DISABLED)
+}
+
+ + + +
+ + + +## Function `concurrent_assets_enabled` + + + +
#[deprecated]
+public fun concurrent_assets_enabled(): bool
+
+ + + +
+Implementation + + +
public fun concurrent_assets_enabled(): bool {
+    abort error::invalid_argument(EFEATURE_CANNOT_BE_DISABLED)
+}
+
+ + + +
+ + + +## Function `get_operator_beneficiary_change_feature` + + + +
public fun get_operator_beneficiary_change_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_operator_beneficiary_change_feature(): u64 { OPERATOR_BENEFICIARY_CHANGE }
+
+ + + +
+ + + +## Function `operator_beneficiary_change_enabled` + + + +
public fun operator_beneficiary_change_enabled(): bool
+
+ + + +
+Implementation + + +
public fun operator_beneficiary_change_enabled(): bool acquires Features {
+    is_enabled(OPERATOR_BENEFICIARY_CHANGE)
+}
+
+ + + +
+ + + +## Function `get_commission_change_delegation_pool_feature` + + + +
public fun get_commission_change_delegation_pool_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_commission_change_delegation_pool_feature(): u64 { COMMISSION_CHANGE_DELEGATION_POOL }
+
+ + + +
+ + + +## Function `commission_change_delegation_pool_enabled` + + + +
public fun commission_change_delegation_pool_enabled(): bool
+
+ + + +
+Implementation + + +
public fun commission_change_delegation_pool_enabled(): bool acquires Features {
+    is_enabled(COMMISSION_CHANGE_DELEGATION_POOL)
+}
+
+ + + +
+ + + +## Function `get_bn254_strutures_feature` + + + +
public fun get_bn254_strutures_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_bn254_strutures_feature(): u64 { BN254_STRUCTURES }
+
+ + + +
+ + + +## Function `bn254_structures_enabled` + + + +
public fun bn254_structures_enabled(): bool
+
+ + + +
+Implementation + + +
public fun bn254_structures_enabled(): bool acquires Features {
+    is_enabled(BN254_STRUCTURES)
+}
+
+ + + +
+ + + +## Function `get_reconfigure_with_dkg_feature` + + + +
public fun get_reconfigure_with_dkg_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_reconfigure_with_dkg_feature(): u64 { RECONFIGURE_WITH_DKG }
+
+ + + +
+ + + +## Function `reconfigure_with_dkg_enabled` + + + +
public fun reconfigure_with_dkg_enabled(): bool
+
+ + + +
+Implementation + + +
public fun reconfigure_with_dkg_enabled(): bool acquires Features {
+    is_enabled(RECONFIGURE_WITH_DKG)
+}
+
+ + + +
+ + + +## Function `get_keyless_accounts_feature` + + + +
public fun get_keyless_accounts_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_keyless_accounts_feature(): u64 { KEYLESS_ACCOUNTS }
+
+ + + +
+ + + +## Function `keyless_accounts_enabled` + + + +
public fun keyless_accounts_enabled(): bool
+
+ + + +
+Implementation + + +
public fun keyless_accounts_enabled(): bool acquires Features {
+    is_enabled(KEYLESS_ACCOUNTS)
+}
+
+ + + +
+ + + +## Function `get_keyless_but_zkless_accounts_feature` + + + +
public fun get_keyless_but_zkless_accounts_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_keyless_but_zkless_accounts_feature(): u64 { KEYLESS_BUT_ZKLESS_ACCOUNTS }
+
+ + + +
+ + + +## Function `keyless_but_zkless_accounts_feature_enabled` + + + +
public fun keyless_but_zkless_accounts_feature_enabled(): bool
+
+ + + +
+Implementation + + +
public fun keyless_but_zkless_accounts_feature_enabled(): bool acquires Features {
+    is_enabled(KEYLESS_BUT_ZKLESS_ACCOUNTS)
+}
+
+ + + +
+ + + +## Function `get_jwk_consensus_feature` + + + +
public fun get_jwk_consensus_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_jwk_consensus_feature(): u64 { JWK_CONSENSUS }
+
+ + + +
+ + + +## Function `jwk_consensus_enabled` + + + +
public fun jwk_consensus_enabled(): bool
+
+ + + +
+Implementation + + +
public fun jwk_consensus_enabled(): bool acquires Features {
+    is_enabled(JWK_CONSENSUS)
+}
+
+ + + +
+ + + +## Function `get_concurrent_fungible_assets_feature` + + + +
public fun get_concurrent_fungible_assets_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_concurrent_fungible_assets_feature(): u64 { CONCURRENT_FUNGIBLE_ASSETS }
+
+ + + +
+ + + +## Function `concurrent_fungible_assets_enabled` + + + +
public fun concurrent_fungible_assets_enabled(): bool
+
+ + + +
+Implementation + + +
public fun concurrent_fungible_assets_enabled(): bool acquires Features {
+    is_enabled(CONCURRENT_FUNGIBLE_ASSETS)
+}
+
+ + + +
+ + + +## Function `is_object_code_deployment_enabled` + + + +
public fun is_object_code_deployment_enabled(): bool
+
+ + + +
+Implementation + + +
public fun is_object_code_deployment_enabled(): bool acquires Features {
+    is_enabled(OBJECT_CODE_DEPLOYMENT)
+}
+
+ + + +
+ + + +## Function `get_max_object_nesting_check_feature` + + + +
public fun get_max_object_nesting_check_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_max_object_nesting_check_feature(): u64 { MAX_OBJECT_NESTING_CHECK }
+
+ + + +
+ + + +## Function `max_object_nesting_check_enabled` + + + +
public fun max_object_nesting_check_enabled(): bool
+
+ + + +
+Implementation + + +
public fun max_object_nesting_check_enabled(): bool acquires Features {
+    is_enabled(MAX_OBJECT_NESTING_CHECK)
+}
+
+ + + +
+ + + +## Function `get_keyless_accounts_with_passkeys_feature` + + + +
public fun get_keyless_accounts_with_passkeys_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_keyless_accounts_with_passkeys_feature(): u64 { KEYLESS_ACCOUNTS_WITH_PASSKEYS }
+
+ + + +
+ + + +## Function `keyless_accounts_with_passkeys_feature_enabled` + + + +
public fun keyless_accounts_with_passkeys_feature_enabled(): bool
+
+ + + +
+Implementation + + +
public fun keyless_accounts_with_passkeys_feature_enabled(): bool acquires Features {
+    is_enabled(KEYLESS_ACCOUNTS_WITH_PASSKEYS)
+}
+
+ + + +
+ + + +## Function `get_multisig_v2_enhancement_feature` + + + +
public fun get_multisig_v2_enhancement_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_multisig_v2_enhancement_feature(): u64 { MULTISIG_V2_ENHANCEMENT }
+
+ + + +
+ + + +## Function `multisig_v2_enhancement_feature_enabled` + + + +
public fun multisig_v2_enhancement_feature_enabled(): bool
+
+ + + +
+Implementation + + +
public fun multisig_v2_enhancement_feature_enabled(): bool acquires Features {
+    is_enabled(MULTISIG_V2_ENHANCEMENT)
+}
+
+ + + +
+ + + +## Function `get_delegation_pool_allowlisting_feature` + + + +
public fun get_delegation_pool_allowlisting_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_delegation_pool_allowlisting_feature(): u64 { DELEGATION_POOL_ALLOWLISTING }
+
+ + + +
+ + + +## Function `delegation_pool_allowlisting_enabled` + + + +
public fun delegation_pool_allowlisting_enabled(): bool
+
+ + + +
+Implementation + + +
public fun delegation_pool_allowlisting_enabled(): bool acquires Features {
+    is_enabled(DELEGATION_POOL_ALLOWLISTING)
+}
+
+ + + +
+ + + +## Function `get_module_event_migration_feature` + + + +
public fun get_module_event_migration_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_module_event_migration_feature(): u64 { MODULE_EVENT_MIGRATION }
+
+ + + +
+ + + +## Function `module_event_migration_enabled` + + + +
public fun module_event_migration_enabled(): bool
+
+ + + +
+Implementation + + +
public fun module_event_migration_enabled(): bool acquires Features {
+    is_enabled(MODULE_EVENT_MIGRATION)
+}
+
+ + + +
+ + + +## Function `get_transaction_context_extension_feature` + + + +
public fun get_transaction_context_extension_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_transaction_context_extension_feature(): u64 { TRANSACTION_CONTEXT_EXTENSION }
+
+ + + +
+ + + +## Function `transaction_context_extension_enabled` + + + +
public fun transaction_context_extension_enabled(): bool
+
+ + + +
+Implementation + + +
public fun transaction_context_extension_enabled(): bool acquires Features {
+    is_enabled(TRANSACTION_CONTEXT_EXTENSION)
+}
+
+ + + +
+ + + +## Function `get_coin_to_fungible_asset_migration_feature` + + + +
public fun get_coin_to_fungible_asset_migration_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_coin_to_fungible_asset_migration_feature(): u64 { COIN_TO_FUNGIBLE_ASSET_MIGRATION }
+
+ + + +
+ + + +## Function `coin_to_fungible_asset_migration_feature_enabled` + + + +
public fun coin_to_fungible_asset_migration_feature_enabled(): bool
+
+ + + +
+Implementation + + +
public fun coin_to_fungible_asset_migration_feature_enabled(): bool acquires Features {
+    is_enabled(COIN_TO_FUNGIBLE_ASSET_MIGRATION)
+}
+
+ + + +
+ + + +## Function `get_primary_apt_fungible_store_at_user_address_feature` + + + +
#[deprecated]
+public fun get_primary_apt_fungible_store_at_user_address_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_primary_apt_fungible_store_at_user_address_feature(
+): u64 {
+    abort error::invalid_argument(EINVALID_FEATURE)
+}
+
+ + + +
+ + + +## Function `primary_apt_fungible_store_at_user_address_enabled` + + + +
#[deprecated]
+public fun primary_apt_fungible_store_at_user_address_enabled(): bool
+
+ + + +
+Implementation + + +
public fun primary_apt_fungible_store_at_user_address_enabled(): bool acquires Features {
+    is_enabled(PRIMARY_APT_FUNGIBLE_STORE_AT_USER_ADDRESS)
+}
+
+ + + +
+ + + +## Function `aggregator_v2_is_at_least_api_enabled` + + + +
public fun aggregator_v2_is_at_least_api_enabled(): bool
+
+ + + +
+Implementation + + +
public fun aggregator_v2_is_at_least_api_enabled(): bool acquires Features {
+    is_enabled(AGGREGATOR_V2_IS_AT_LEAST_API)
+}
+
+ + + +
+ + + +## Function `get_object_native_derived_address_feature` + + + +
public fun get_object_native_derived_address_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_object_native_derived_address_feature(): u64 { OBJECT_NATIVE_DERIVED_ADDRESS }
+
+ + + +
+ + + +## Function `object_native_derived_address_enabled` + + + +
public fun object_native_derived_address_enabled(): bool
+
+ + + +
+Implementation + + +
public fun object_native_derived_address_enabled(): bool acquires Features {
+    is_enabled(OBJECT_NATIVE_DERIVED_ADDRESS)
+}
+
+ + + +
+ + + +## Function `get_dispatchable_fungible_asset_feature` + + + +
public fun get_dispatchable_fungible_asset_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_dispatchable_fungible_asset_feature(): u64 { DISPATCHABLE_FUNGIBLE_ASSET }
+
+ + + +
+ + + +## Function `dispatchable_fungible_asset_enabled` + + + +
public fun dispatchable_fungible_asset_enabled(): bool
+
+ + + +
+Implementation + + +
public fun dispatchable_fungible_asset_enabled(): bool acquires Features {
+    is_enabled(DISPATCHABLE_FUNGIBLE_ASSET)
+}
+
+ + + +
+ + + +## Function `get_new_accounts_default_to_fa_apt_store_feature` + + + +
public fun get_new_accounts_default_to_fa_apt_store_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_new_accounts_default_to_fa_apt_store_feature(): u64 { NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE }
+
+ + + +
+ + + +## Function `new_accounts_default_to_fa_apt_store_enabled` + + + +
public fun new_accounts_default_to_fa_apt_store_enabled(): bool
+
+ + + +
+Implementation + + +
public fun new_accounts_default_to_fa_apt_store_enabled(): bool acquires Features {
+    is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE)
+}
+
+ + + +
+ + + +## Function `get_operations_default_to_fa_apt_store_feature` + + + +
public fun get_operations_default_to_fa_apt_store_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_operations_default_to_fa_apt_store_feature(): u64 { OPERATIONS_DEFAULT_TO_FA_APT_STORE }
+
+ + + +
+ + + +## Function `operations_default_to_fa_apt_store_enabled` + + + +
public fun operations_default_to_fa_apt_store_enabled(): bool
+
+ + + +
+Implementation + + +
public fun operations_default_to_fa_apt_store_enabled(): bool acquires Features {
+    is_enabled(OPERATIONS_DEFAULT_TO_FA_APT_STORE)
+}
+
+ + + +
+ + + +## Function `get_concurrent_fungible_balance_feature` + + + +
public fun get_concurrent_fungible_balance_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_concurrent_fungible_balance_feature(): u64 { CONCURRENT_FUNGIBLE_BALANCE }
+
+ + + +
+ + + +## Function `concurrent_fungible_balance_enabled` + + + +
public fun concurrent_fungible_balance_enabled(): bool
+
+ + + +
+Implementation + + +
public fun concurrent_fungible_balance_enabled(): bool acquires Features {
+    is_enabled(CONCURRENT_FUNGIBLE_BALANCE)
+}
+
+ + + +
+ + + +## Function `get_default_to_concurrent_fungible_balance_feature` + + + +
public fun get_default_to_concurrent_fungible_balance_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_default_to_concurrent_fungible_balance_feature(): u64 { DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE }
+
+ + + +
+ + + +## Function `default_to_concurrent_fungible_balance_enabled` + + + +
public fun default_to_concurrent_fungible_balance_enabled(): bool
+
+ + + +
+Implementation + + +
public fun default_to_concurrent_fungible_balance_enabled(): bool acquires Features {
+    is_enabled(DEFAULT_TO_CONCURRENT_FUNGIBLE_BALANCE)
+}
+
+ + + +
+ + + +## Function `get_abort_if_multisig_payload_mismatch_feature` + + + +
public fun get_abort_if_multisig_payload_mismatch_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_abort_if_multisig_payload_mismatch_feature(): u64 { ABORT_IF_MULTISIG_PAYLOAD_MISMATCH }
+
+ + + +
+ + + +## Function `abort_if_multisig_payload_mismatch_enabled` + + + +
public fun abort_if_multisig_payload_mismatch_enabled(): bool
+
+ + + +
+Implementation + + +
public fun abort_if_multisig_payload_mismatch_enabled(): bool acquires Features {
+    is_enabled(ABORT_IF_MULTISIG_PAYLOAD_MISMATCH)
+}
+
+ + + +
+ + + +## Function `get_transaction_simulation_enhancement_feature` + + + +
public fun get_transaction_simulation_enhancement_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_transaction_simulation_enhancement_feature(): u64 { TRANSACTION_SIMULATION_ENHANCEMENT }
+
+ + + +
+ + + +## Function `transaction_simulation_enhancement_enabled` + + + +
public fun transaction_simulation_enhancement_enabled(): bool
+
+ + + +
+Implementation + + +
public fun transaction_simulation_enhancement_enabled(): bool acquires Features {
+    is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT)
+}
+
+ + + +
+ + + +## Function `get_collection_owner_feature` + + + +
public fun get_collection_owner_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_collection_owner_feature(): u64 { COLLECTION_OWNER }
+
+ + + +
+ + + +## Function `is_collection_owner_enabled` + + + +
public fun is_collection_owner_enabled(): bool
+
+ + + +
+Implementation + + +
public fun is_collection_owner_enabled(): bool acquires Features {
+    is_enabled(COLLECTION_OWNER)
+}
+
+ + + +
+ + + +## Function `change_feature_flags` + +Deprecated to prevent validator set changes during DKG. + +Genesis/tests should use change_feature_flags_internal() for feature vec initialization. + +Governance proposals should use change_feature_flags_for_next_epoch() to enable/disable features. + + +
public fun change_feature_flags(_framework: &signer, _enable: vector<u64>, _disable: vector<u64>)
+
+ + + +
+Implementation + + +
public fun change_feature_flags(_framework: &signer, _enable: vector<u64>, _disable: vector<u64>) {
+    abort (error::invalid_state(EAPI_DISABLED))
+}
+
+ + + +
+ + + +## Function `change_feature_flags_internal` + +Update feature flags directly. Only used in genesis/tests. + + +
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>) acquires Features {
+    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+    if (!exists<Features>(@std)) {
+        move_to<Features>(framework, Features { features: vector[] })
+    };
+    let features = &mut borrow_global_mut<Features>(@std).features;
+    vector::for_each_ref(&enable, |feature| {
+        set(features, *feature, true);
+    });
+    vector::for_each_ref(&disable, |feature| {
+        set(features, *feature, false);
+    });
+}
+
+ + + +
+ + + +## Function `change_feature_flags_for_next_epoch` + +Enable and disable features for the next epoch. + + +
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
public fun change_feature_flags_for_next_epoch(
+    framework: &signer,
+    enable: vector<u64>,
+    disable: vector<u64>
+) acquires PendingFeatures, Features {
+    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+
+    // Figure out the baseline feature vec that the diff will be applied to.
+    let new_feature_vec = if (exists<PendingFeatures>(@std)) {
+        // If there is a buffered feature vec, use it as the baseline.
+        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
+        features
+    } else if (exists<Features>(@std)) {
+        // Otherwise, use the currently effective feature flag vec as the baseline, if it exists.
+        borrow_global<Features>(@std).features
+    } else {
+        // Otherwise, use an empty feature vec.
+        vector[]
+    };
+
+    // Apply the diff and save it to the buffer.
+    apply_diff(&mut new_feature_vec, enable, disable);
+    move_to(framework, PendingFeatures { features: new_feature_vec });
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Apply all the pending feature flag changes. Should only be used at the end of a reconfiguration with DKG. + +While the scope is public, it can only be usd in system transactions like block_prologue and governance proposals, +who have permission to set the flag that's checked in extract(). + + +
public fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public fun on_new_epoch(framework: &signer) acquires Features, PendingFeatures {
+    ensure_framework_signer(framework);
+    if (exists<PendingFeatures>(@std)) {
+        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
+        if (exists<Features>(@std)) {
+            borrow_global_mut<Features>(@std).features = features;
+        } else {
+            move_to(framework, Features { features })
+        }
+    }
+}
+
+ + + +
+ + + +## Function `is_enabled` + +Check whether the feature is enabled. + + +
#[view]
+public fun is_enabled(feature: u64): bool
+
+ + + +
+Implementation + + +
public fun is_enabled(feature: u64): bool acquires Features {
+    exists<Features>(@std) &&
+        contains(&borrow_global<Features>(@std).features, feature)
+}
+
+ + + +
+ + + +## Function `set` + +Helper to include or exclude a feature flag. + + +
fun set(features: &mut vector<u8>, feature: u64, include: bool)
+
+ + + +
+Implementation + + +
fun set(features: &mut vector<u8>, feature: u64, include: bool) {
+    let byte_index = feature / 8;
+    let bit_mask = 1 << ((feature % 8) as u8);
+    while (vector::length(features) <= byte_index) {
+        vector::push_back(features, 0)
+    };
+    let entry = vector::borrow_mut(features, byte_index);
+    if (include)
+        *entry = *entry | bit_mask
+    else
+        *entry = *entry & (0xff ^ bit_mask)
+}
+
+ + + +
+ + + +## Function `contains` + +Helper to check whether a feature flag is enabled. + + +
fun contains(features: &vector<u8>, feature: u64): bool
+
+ + + +
+Implementation + + +
fun contains(features: &vector<u8>, feature: u64): bool {
+    let byte_index = feature / 8;
+    let bit_mask = 1 << ((feature % 8) as u8);
+    byte_index < vector::length(features) && (*vector::borrow(features, byte_index) & bit_mask) != 0
+}
+
+ + + +
+ + + +## Function `apply_diff` + + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>) {
+    vector::for_each(enable, |feature| {
+        set(features, feature, true);
+    });
+    vector::for_each(disable, |feature| {
+        set(features, feature, false);
+    });
+}
+
+ + + +
+ + + +## Function `ensure_framework_signer` + + + +
fun ensure_framework_signer(account: &signer)
+
+ + + +
+Implementation + + +
fun ensure_framework_signer(account: &signer) {
+    let addr = signer::address_of(account);
+    assert!(addr == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+}
+
+ + + +
+ + + +## Specification + + + + +### Resource `Features` + + +
struct Features has key
+
+ + + +
+
+features: vector<u8> +
+
+ +
+
+ + + +
pragma bv=b"0";
+
+ + + + + +### Resource `PendingFeatures` + + +
struct PendingFeatures has key
+
+ + + +
+
+features: vector<u8> +
+
+ +
+
+ + + +
pragma bv=b"0";
+
+ + + + + +### Function `periodical_reward_rate_decrease_enabled` + + +
public fun periodical_reward_rate_decrease_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_periodical_reward_rate_decrease_enabled();
+
+ + + + + + + +
fun spec_partial_governance_voting_enabled(): bool {
+   spec_is_enabled(PARTIAL_GOVERNANCE_VOTING)
+}
+
+ + + + + +### Function `partial_governance_voting_enabled` + + +
public fun partial_governance_voting_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_partial_governance_voting_enabled();
+
+ + + + + +### Function `module_event_enabled` + + +
public fun module_event_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_module_event_enabled();
+
+ + + + + + + +
fun spec_abort_if_multisig_payload_mismatch_enabled(): bool {
+   spec_is_enabled(ABORT_IF_MULTISIG_PAYLOAD_MISMATCH)
+}
+
+ + + + + + + +
fun spec_simulation_enhancement_enabled(): bool {
+   spec_is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT)
+}
+
+ + + + + +### Function `abort_if_multisig_payload_mismatch_enabled` + + +
public fun abort_if_multisig_payload_mismatch_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_abort_if_multisig_payload_mismatch_enabled();
+
+ + + + + +### Function `change_feature_flags_internal` + + +
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + + +
pragma opaque;
+modifies global<Features>(@std);
+aborts_if signer::address_of(framework) != @std;
+
+ + + + + +### Function `change_feature_flags_for_next_epoch` + + +
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + + +
aborts_if signer::address_of(framework) != @std;
+pragma opaque;
+modifies global<Features>(@std);
+modifies global<PendingFeatures>(@std);
+
+ + + + + + + +
fun spec_contains(features: vector<u8>, feature: u64): bool {
+   ((int2bv((((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)) as u8) & features[feature/8] as u8) > (0 as u8)
+       && (feature / 8) < len(features)
+}
+
+ + + + + +### Function `on_new_epoch` + + +
public fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @std == signer::address_of(framework);
+let features_pending = global<PendingFeatures>(@std).features;
+let post features_std = global<Features>(@std).features;
+ensures exists<PendingFeatures>(@std) ==> features_std == features_pending;
+aborts_if false;
+
+ + + + + + + +
fun spec_sha_512_and_ripemd_160_enabled(): bool {
+   spec_is_enabled(SHA_512_AND_RIPEMD_160_NATIVES)
+}
+
+ + + + + +### Function `is_enabled` + + +
#[view]
+public fun is_enabled(feature: u64): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_is_enabled(feature);
+
+ + + + + + + +
fun spec_is_enabled(feature: u64): bool;
+
+ + + + + + + +
fun spec_periodical_reward_rate_decrease_enabled(): bool {
+   spec_is_enabled(PERIODICAL_REWARD_RATE_DECREASE)
+}
+
+ + + + + + + +
fun spec_fee_payer_enabled(): bool {
+   spec_is_enabled(FEE_PAYER_ENABLED)
+}
+
+ + + + + + + +
fun spec_collect_and_distribute_gas_fees_enabled(): bool {
+   spec_is_enabled(COLLECT_AND_DISTRIBUTE_GAS_FEES)
+}
+
+ + + + + + + +
fun spec_module_event_enabled(): bool {
+   spec_is_enabled(MODULE_EVENT)
+}
+
+ + + + + +### Function `set` + + +
fun set(features: &mut vector<u8>, feature: u64, include: bool)
+
+ + + + +
pragma bv=b"0";
+aborts_if false;
+ensures feature / 8 < len(features);
+ensures include == spec_contains(features, feature);
+
+ + + + + +### Function `contains` + + +
fun contains(features: &vector<u8>, feature: u64): bool
+
+ + + + +
pragma bv=b"0";
+aborts_if false;
+ensures result == spec_contains(features, feature);
+
+ + + + + +### Function `apply_diff` + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>)
+
+ + + + +
aborts_if [abstract] false;
+ensures [abstract] forall i in disable: !spec_contains(features, i);
+ensures [abstract] forall i in enable: !vector::spec_contains(disable, i)
+    ==> spec_contains(features, i);
+pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/fixed_point32.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/fixed_point32.md new file mode 100644 index 0000000000000..1c17e78c9dcba --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/fixed_point32.md @@ -0,0 +1,884 @@ + + + +# Module `0x1::fixed_point32` + +Defines a fixed-point numeric type with a 32-bit integer part and +a 32-bit fractional part. + + +- [Struct `FixedPoint32`](#0x1_fixed_point32_FixedPoint32) +- [Constants](#@Constants_0) +- [Function `multiply_u64`](#0x1_fixed_point32_multiply_u64) +- [Function `divide_u64`](#0x1_fixed_point32_divide_u64) +- [Function `create_from_rational`](#0x1_fixed_point32_create_from_rational) +- [Function `create_from_raw_value`](#0x1_fixed_point32_create_from_raw_value) +- [Function `get_raw_value`](#0x1_fixed_point32_get_raw_value) +- [Function `is_zero`](#0x1_fixed_point32_is_zero) +- [Function `min`](#0x1_fixed_point32_min) +- [Function `max`](#0x1_fixed_point32_max) +- [Function `create_from_u64`](#0x1_fixed_point32_create_from_u64) +- [Function `floor`](#0x1_fixed_point32_floor) +- [Function `ceil`](#0x1_fixed_point32_ceil) +- [Function `round`](#0x1_fixed_point32_round) +- [Specification](#@Specification_1) + - [Function `multiply_u64`](#@Specification_1_multiply_u64) + - [Function `divide_u64`](#@Specification_1_divide_u64) + - [Function `create_from_rational`](#@Specification_1_create_from_rational) + - [Function `create_from_raw_value`](#@Specification_1_create_from_raw_value) + - [Function `min`](#@Specification_1_min) + - [Function `max`](#@Specification_1_max) + - [Function `create_from_u64`](#@Specification_1_create_from_u64) + - [Function `floor`](#@Specification_1_floor) + - [Function `ceil`](#@Specification_1_ceil) + - [Function `round`](#@Specification_1_round) + + +
+ + + + + +## Struct `FixedPoint32` + +Define a fixed-point numeric type with 32 fractional bits. +This is just a u64 integer but it is wrapped in a struct to +make a unique type. This is a binary representation, so decimal +values may not be exactly representable, but it provides more +than 9 decimal digits of precision both before and after the +decimal point (18 digits total). For comparison, double precision +floating-point has less than 16 decimal digits of precision, so +be careful about using floating-point to convert these values to +decimal. + + +
struct FixedPoint32 has copy, drop, store
+
+ + + +
+Fields + + +
+
+value: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + +The denominator provided was zero + + +
const EDENOMINATOR: u64 = 65537;
+
+ + + + + +The quotient value would be too large to be held in a u64 + + +
const EDIVISION: u64 = 131074;
+
+ + + + + +A division by zero was encountered + + +
const EDIVISION_BY_ZERO: u64 = 65540;
+
+ + + + + +The multiplied value would be too large to be held in a u64 + + +
const EMULTIPLICATION: u64 = 131075;
+
+ + + + + +The computed ratio when converting to a FixedPoint32 would be unrepresentable + + +
const ERATIO_OUT_OF_RANGE: u64 = 131077;
+
+ + + + + +## Function `multiply_u64` + +Multiply a u64 integer by a fixed-point number, truncating any +fractional part of the product. This will abort if the product +overflows. + + +
public fun multiply_u64(val: u64, multiplier: fixed_point32::FixedPoint32): u64
+
+ + + +
+Implementation + + +
public fun multiply_u64(val: u64, multiplier: FixedPoint32): u64 {
+    // The product of two 64 bit values has 128 bits, so perform the
+    // multiplication with u128 types and keep the full 128 bit product
+    // to avoid losing accuracy.
+    let unscaled_product = (val as u128) * (multiplier.value as u128);
+    // The unscaled product has 32 fractional bits (from the multiplier)
+    // so rescale it by shifting away the low bits.
+    let product = unscaled_product >> 32;
+    // Check whether the value is too large.
+    assert!(product <= MAX_U64, EMULTIPLICATION);
+    (product as u64)
+}
+
+ + + +
+ + + +## Function `divide_u64` + +Divide a u64 integer by a fixed-point number, truncating any +fractional part of the quotient. This will abort if the divisor +is zero or if the quotient overflows. + + +
public fun divide_u64(val: u64, divisor: fixed_point32::FixedPoint32): u64
+
+ + + +
+Implementation + + +
public fun divide_u64(val: u64, divisor: FixedPoint32): u64 {
+    // Check for division by zero.
+    assert!(divisor.value != 0, EDIVISION_BY_ZERO);
+    // First convert to 128 bits and then shift left to
+    // add 32 fractional zero bits to the dividend.
+    let scaled_value = (val as u128) << 32;
+    let quotient = scaled_value / (divisor.value as u128);
+    // Check whether the value is too large.
+    assert!(quotient <= MAX_U64, EDIVISION);
+    // the value may be too large, which will cause the cast to fail
+    // with an arithmetic error.
+    (quotient as u64)
+}
+
+ + + +
+ + + +## Function `create_from_rational` + +Create a fixed-point value from a rational number specified by its +numerator and denominator. Calling this function should be preferred +for using Self::create_from_raw_value which is also available. +This will abort if the denominator is zero. It will also +abort if the numerator is nonzero and the ratio is not in the range +2^-32 .. 2^32-1. When specifying decimal fractions, be careful about +rounding errors: if you round to display N digits after the decimal +point, you can use a denominator of 10^N to avoid numbers where the +very small imprecision in the binary representation could change the +rounding, e.g., 0.0125 will round down to 0.012 instead of up to 0.013. + + +
public fun create_from_rational(numerator: u64, denominator: u64): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun create_from_rational(numerator: u64, denominator: u64): FixedPoint32 {
+    // If the denominator is zero, this will abort.
+    // Scale the numerator to have 64 fractional bits and the denominator
+    // to have 32 fractional bits, so that the quotient will have 32
+    // fractional bits.
+    let scaled_numerator = (numerator as u128) << 64;
+    let scaled_denominator = (denominator as u128) << 32;
+    assert!(scaled_denominator != 0, EDENOMINATOR);
+    let quotient = scaled_numerator / scaled_denominator;
+    assert!(quotient != 0 || numerator == 0, ERATIO_OUT_OF_RANGE);
+    // Return the quotient as a fixed-point number. We first need to check whether the cast
+    // can succeed.
+    assert!(quotient <= MAX_U64, ERATIO_OUT_OF_RANGE);
+    FixedPoint32 { value: (quotient as u64) }
+}
+
+ + + +
+ + + +## Function `create_from_raw_value` + +Create a fixedpoint value from a raw value. + + +
public fun create_from_raw_value(value: u64): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun create_from_raw_value(value: u64): FixedPoint32 {
+    FixedPoint32 { value }
+}
+
+ + + +
+ + + +## Function `get_raw_value` + +Accessor for the raw u64 value. Other less common operations, such as +adding or subtracting FixedPoint32 values, can be done using the raw +values directly. + + +
public fun get_raw_value(self: fixed_point32::FixedPoint32): u64
+
+ + + +
+Implementation + + +
public fun get_raw_value(self: FixedPoint32): u64 {
+    self.value
+}
+
+ + + +
+ + + +## Function `is_zero` + +Returns true if the ratio is zero. + + +
public fun is_zero(self: fixed_point32::FixedPoint32): bool
+
+ + + +
+Implementation + + +
public fun is_zero(self: FixedPoint32): bool {
+    self.value == 0
+}
+
+ + + +
+ + + +## Function `min` + +Returns the smaller of the two FixedPoint32 numbers. + + +
public fun min(num1: fixed_point32::FixedPoint32, num2: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun min(num1: FixedPoint32, num2: FixedPoint32): FixedPoint32 {
+    if (num1.value < num2.value) {
+        num1
+    } else {
+        num2
+    }
+}
+
+ + + +
+ + + +## Function `max` + +Returns the larger of the two FixedPoint32 numbers. + + +
public fun max(num1: fixed_point32::FixedPoint32, num2: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun max(num1: FixedPoint32, num2: FixedPoint32): FixedPoint32 {
+    if (num1.value > num2.value) {
+        num1
+    } else {
+        num2
+    }
+}
+
+ + + +
+ + + +## Function `create_from_u64` + +Create a fixedpoint value from a u64 value. + + +
public fun create_from_u64(val: u64): fixed_point32::FixedPoint32
+
+ + + +
+Implementation + + +
public fun create_from_u64(val: u64): FixedPoint32 {
+    let value = (val as u128) << 32;
+    assert!(value <= MAX_U64, ERATIO_OUT_OF_RANGE);
+    FixedPoint32 {value: (value as u64)}
+}
+
+ + + +
+ + + +## Function `floor` + +Returns the largest integer less than or equal to a given number. + + +
public fun floor(self: fixed_point32::FixedPoint32): u64
+
+ + + +
+Implementation + + +
public fun floor(self: FixedPoint32): u64 {
+    self.value >> 32
+}
+
+ + + +
+ + + +## Function `ceil` + +Rounds up the given FixedPoint32 to the next largest integer. + + +
public fun ceil(self: fixed_point32::FixedPoint32): u64
+
+ + + +
+Implementation + + +
public fun ceil(self: FixedPoint32): u64 {
+    let floored_num = floor(self) << 32;
+    if (self.value == floored_num) {
+        return floored_num >> 32
+    };
+    let val = ((floored_num as u128) + (1 << 32));
+    (val >> 32 as u64)
+}
+
+ + + +
+ + + +## Function `round` + +Returns the value of a FixedPoint32 to the nearest integer. + + +
public fun round(self: fixed_point32::FixedPoint32): u64
+
+ + + +
+Implementation + + +
public fun round(self: FixedPoint32): u64 {
+    let floored_num = floor(self) << 32;
+    let boundary = floored_num + ((1 << 32) / 2);
+    if (self.value < boundary) {
+        floored_num >> 32
+    } else {
+        ceil(self)
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +
pragma aborts_if_is_strict;
+
+ + + + + +### Function `multiply_u64` + + +
public fun multiply_u64(val: u64, multiplier: fixed_point32::FixedPoint32): u64
+
+ + + + +
pragma opaque;
+include MultiplyAbortsIf;
+ensures result == spec_multiply_u64(val, multiplier);
+
+ + + + + + + +
schema MultiplyAbortsIf {
+    val: num;
+    multiplier: FixedPoint32;
+    aborts_if spec_multiply_u64(val, multiplier) > MAX_U64 with EMULTIPLICATION;
+}
+
+ + + + + + + +
fun spec_multiply_u64(val: num, multiplier: FixedPoint32): num {
+   (val * multiplier.value) >> 32
+}
+
+ + + + + +### Function `divide_u64` + + +
public fun divide_u64(val: u64, divisor: fixed_point32::FixedPoint32): u64
+
+ + + + +
pragma opaque;
+include DivideAbortsIf;
+ensures result == spec_divide_u64(val, divisor);
+
+ + + + + + + +
schema DivideAbortsIf {
+    val: num;
+    divisor: FixedPoint32;
+    aborts_if divisor.value == 0 with EDIVISION_BY_ZERO;
+    aborts_if spec_divide_u64(val, divisor) > MAX_U64 with EDIVISION;
+}
+
+ + + + + + + +
fun spec_divide_u64(val: num, divisor: FixedPoint32): num {
+   (val << 32) / divisor.value
+}
+
+ + + + + +### Function `create_from_rational` + + +
public fun create_from_rational(numerator: u64, denominator: u64): fixed_point32::FixedPoint32
+
+ + + + +
pragma opaque;
+include CreateFromRationalAbortsIf;
+ensures result == spec_create_from_rational(numerator, denominator);
+
+ + + + + + + +
schema CreateFromRationalAbortsIf {
+    numerator: u64;
+    denominator: u64;
+    let scaled_numerator = (numerator as u128)<< 64;
+    let scaled_denominator = (denominator as u128) << 32;
+    let quotient = scaled_numerator / scaled_denominator;
+    aborts_if scaled_denominator == 0 with EDENOMINATOR;
+    aborts_if quotient == 0 && scaled_numerator != 0 with ERATIO_OUT_OF_RANGE;
+    aborts_if quotient > MAX_U64 with ERATIO_OUT_OF_RANGE;
+}
+
+ + + + + + + +
fun spec_create_from_rational(numerator: num, denominator: num): FixedPoint32 {
+   FixedPoint32{value: (numerator << 64) / (denominator << 32)}
+}
+
+ + + + + +### Function `create_from_raw_value` + + +
public fun create_from_raw_value(value: u64): fixed_point32::FixedPoint32
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result.value == value;
+
+ + + + + +### Function `min` + + +
public fun min(num1: fixed_point32::FixedPoint32, num2: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_min(num1, num2);
+
+ + + + + + + +
fun spec_min(num1: FixedPoint32, num2: FixedPoint32): FixedPoint32 {
+   if (num1.value < num2.value) {
+       num1
+   } else {
+       num2
+   }
+}
+
+ + + + + +### Function `max` + + +
public fun max(num1: fixed_point32::FixedPoint32, num2: fixed_point32::FixedPoint32): fixed_point32::FixedPoint32
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_max(num1, num2);
+
+ + + + + + + +
fun spec_max(num1: FixedPoint32, num2: FixedPoint32): FixedPoint32 {
+   if (num1.value > num2.value) {
+       num1
+   } else {
+       num2
+   }
+}
+
+ + + + + +### Function `create_from_u64` + + +
public fun create_from_u64(val: u64): fixed_point32::FixedPoint32
+
+ + + + +
pragma opaque;
+include CreateFromU64AbortsIf;
+ensures result == spec_create_from_u64(val);
+
+ + + + + + + +
schema CreateFromU64AbortsIf {
+    val: num;
+    let scaled_value = (val as u128) << 32;
+    aborts_if scaled_value > MAX_U64;
+}
+
+ + + + + + + +
fun spec_create_from_u64(val: num): FixedPoint32 {
+   FixedPoint32 {value: val << 32}
+}
+
+ + + + + +### Function `floor` + + +
public fun floor(self: fixed_point32::FixedPoint32): u64
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_floor(self);
+
+ + + + + + + +
fun spec_floor(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
+   if (fractional == 0) {
+       self.value >> 32
+   } else {
+       (self.value - fractional) >> 32
+   }
+}
+
+ + + + + +### Function `ceil` + + +
public fun ceil(self: fixed_point32::FixedPoint32): u64
+
+ + + + +
pragma verify_duration_estimate = 120;
+pragma opaque;
+aborts_if false;
+ensures result == spec_ceil(self);
+
+ + + + + + + +
fun spec_ceil(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
+   let one = 1 << 32;
+   if (fractional == 0) {
+       self.value >> 32
+   } else {
+       (self.value - fractional + one) >> 32
+   }
+}
+
+ + + + + +### Function `round` + + +
public fun round(self: fixed_point32::FixedPoint32): u64
+
+ + + + +
pragma verify_duration_estimate = 120;
+pragma opaque;
+aborts_if false;
+ensures result == spec_round(self);
+
+ + + + + + + +
fun spec_round(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
+   let boundary = (1 << 32) / 2;
+   let one = 1 << 32;
+   if (fractional < boundary) {
+       (self.value - fractional) >> 32
+   } else {
+       (self.value - fractional + one) >> 32
+   }
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/hash.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/hash.md new file mode 100644 index 0000000000000..71b2950158b0c --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/hash.md @@ -0,0 +1,65 @@ + + + +# Module `0x1::hash` + +Module which defines SHA hashes for byte vectors. + +The functions in this module are natively declared both in the Move runtime +as in the Move prover's prelude. + + +- [Function `sha2_256`](#0x1_hash_sha2_256) +- [Function `sha3_256`](#0x1_hash_sha3_256) + + +
+ + + + + +## Function `sha2_256` + + + +
public fun sha2_256(data: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native public fun sha2_256(data: vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `sha3_256` + + + +
public fun sha3_256(data: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native public fun sha3_256(data: vector<u8>): vector<u8>;
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/option.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/option.md new file mode 100644 index 0000000000000..5da326c846261 --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/option.md @@ -0,0 +1,1330 @@ + + + +# Module `0x1::option` + +This module defines the Option type and its methods to represent and handle an optional value. + + +- [Struct `Option`](#0x1_option_Option) +- [Constants](#@Constants_0) +- [Function `none`](#0x1_option_none) +- [Function `some`](#0x1_option_some) +- [Function `from_vec`](#0x1_option_from_vec) +- [Function `is_none`](#0x1_option_is_none) +- [Function `is_some`](#0x1_option_is_some) +- [Function `contains`](#0x1_option_contains) +- [Function `borrow`](#0x1_option_borrow) +- [Function `borrow_with_default`](#0x1_option_borrow_with_default) +- [Function `get_with_default`](#0x1_option_get_with_default) +- [Function `fill`](#0x1_option_fill) +- [Function `extract`](#0x1_option_extract) +- [Function `borrow_mut`](#0x1_option_borrow_mut) +- [Function `swap`](#0x1_option_swap) +- [Function `swap_or_fill`](#0x1_option_swap_or_fill) +- [Function `destroy_with_default`](#0x1_option_destroy_with_default) +- [Function `destroy_some`](#0x1_option_destroy_some) +- [Function `destroy_none`](#0x1_option_destroy_none) +- [Function `to_vec`](#0x1_option_to_vec) +- [Function `for_each`](#0x1_option_for_each) +- [Function `for_each_ref`](#0x1_option_for_each_ref) +- [Function `for_each_mut`](#0x1_option_for_each_mut) +- [Function `fold`](#0x1_option_fold) +- [Function `map`](#0x1_option_map) +- [Function `map_ref`](#0x1_option_map_ref) +- [Function `filter`](#0x1_option_filter) +- [Function `any`](#0x1_option_any) +- [Function `destroy`](#0x1_option_destroy) +- [Specification](#@Specification_1) + - [Helper Schema](#@Helper_Schema_2) + - [Struct `Option`](#@Specification_1_Option) + - [Function `none`](#@Specification_1_none) + - [Function `some`](#@Specification_1_some) + - [Function `from_vec`](#@Specification_1_from_vec) + - [Function `is_none`](#@Specification_1_is_none) + - [Function `is_some`](#@Specification_1_is_some) + - [Function `contains`](#@Specification_1_contains) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `borrow_with_default`](#@Specification_1_borrow_with_default) + - [Function `get_with_default`](#@Specification_1_get_with_default) + - [Function `fill`](#@Specification_1_fill) + - [Function `extract`](#@Specification_1_extract) + - [Function `borrow_mut`](#@Specification_1_borrow_mut) + - [Function `swap`](#@Specification_1_swap) + - [Function `swap_or_fill`](#@Specification_1_swap_or_fill) + - [Function `destroy_with_default`](#@Specification_1_destroy_with_default) + - [Function `destroy_some`](#@Specification_1_destroy_some) + - [Function `destroy_none`](#@Specification_1_destroy_none) + - [Function `to_vec`](#@Specification_1_to_vec) + + +
use 0x1::vector;
+
+ + + + + +## Struct `Option` + +Abstraction of a value that may or may not be present. Implemented with a vector of size +zero or one because Move bytecode does not have ADTs. + + +
struct Option<Element> has copy, drop, store
+
+ + + +
+Fields + + +
+
+vec: vector<Element> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The Option is in an invalid state for the operation attempted. +The Option is Some while it should be None. + + +
const EOPTION_IS_SET: u64 = 262144;
+
+ + + + + +The Option is in an invalid state for the operation attempted. +The Option is None while it should be Some. + + +
const EOPTION_NOT_SET: u64 = 262145;
+
+ + + + + +Cannot construct an option from a vector with 2 or more elements. + + +
const EOPTION_VEC_TOO_LONG: u64 = 262146;
+
+ + + + + +## Function `none` + +Return an empty Option + + +
public fun none<Element>(): option::Option<Element>
+
+ + + +
+Implementation + + +
public fun none<Element>(): Option<Element> {
+    Option { vec: vector::empty() }
+}
+
+ + + +
+ + + +## Function `some` + +Return an Option containing e + + +
public fun some<Element>(e: Element): option::Option<Element>
+
+ + + +
+Implementation + + +
public fun some<Element>(e: Element): Option<Element> {
+    Option { vec: vector::singleton(e) }
+}
+
+ + + +
+ + + +## Function `from_vec` + + + +
public fun from_vec<Element>(vec: vector<Element>): option::Option<Element>
+
+ + + +
+Implementation + + +
public fun from_vec<Element>(vec: vector<Element>): Option<Element> {
+    assert!(vector::length(&vec) <= 1, EOPTION_VEC_TOO_LONG);
+    Option { vec }
+}
+
+ + + +
+ + + +## Function `is_none` + +Return true if self does not hold a value + + +
public fun is_none<Element>(self: &option::Option<Element>): bool
+
+ + + +
+Implementation + + +
public fun is_none<Element>(self: &Option<Element>): bool {
+    vector::is_empty(&self.vec)
+}
+
+ + + +
+ + + +## Function `is_some` + +Return true if self holds a value + + +
public fun is_some<Element>(self: &option::Option<Element>): bool
+
+ + + +
+Implementation + + +
public fun is_some<Element>(self: &Option<Element>): bool {
+    !vector::is_empty(&self.vec)
+}
+
+ + + +
+ + + +## Function `contains` + +Return true if the value in self is equal to e_ref +Always returns false if self does not hold a value + + +
public fun contains<Element>(self: &option::Option<Element>, e_ref: &Element): bool
+
+ + + +
+Implementation + + +
public fun contains<Element>(self: &Option<Element>, e_ref: &Element): bool {
+    vector::contains(&self.vec, e_ref)
+}
+
+ + + +
+ + + +## Function `borrow` + +Return an immutable reference to the value inside self +Aborts if self does not hold a value + + +
public fun borrow<Element>(self: &option::Option<Element>): &Element
+
+ + + +
+Implementation + + +
public fun borrow<Element>(self: &Option<Element>): &Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    vector::borrow(&self.vec, 0)
+}
+
+ + + +
+ + + +## Function `borrow_with_default` + +Return a reference to the value inside self if it holds one +Return default_ref if self does not hold a value + + +
public fun borrow_with_default<Element>(self: &option::Option<Element>, default_ref: &Element): &Element
+
+ + + +
+Implementation + + +
public fun borrow_with_default<Element>(self: &Option<Element>, default_ref: &Element): &Element {
+    let vec_ref = &self.vec;
+    if (vector::is_empty(vec_ref)) default_ref
+    else vector::borrow(vec_ref, 0)
+}
+
+ + + +
+ + + +## Function `get_with_default` + +Return the value inside self if it holds one +Return default if self does not hold a value + + +
public fun get_with_default<Element: copy, drop>(self: &option::Option<Element>, default: Element): Element
+
+ + + +
+Implementation + + +
public fun get_with_default<Element: copy + drop>(
+    self: &Option<Element>,
+    default: Element,
+): Element {
+    let vec_ref = &self.vec;
+    if (vector::is_empty(vec_ref)) default
+    else *vector::borrow(vec_ref, 0)
+}
+
+ + + +
+ + + +## Function `fill` + +Convert the none option self to a some option by adding e. +Aborts if self already holds a value + + +
public fun fill<Element>(self: &mut option::Option<Element>, e: Element)
+
+ + + +
+Implementation + + +
public fun fill<Element>(self: &mut Option<Element>, e: Element) {
+    let vec_ref = &mut self.vec;
+    if (vector::is_empty(vec_ref)) vector::push_back(vec_ref, e)
+    else abort EOPTION_IS_SET
+}
+
+ + + +
+ + + +## Function `extract` + +Convert a some option to a none by removing and returning the value stored inside self +Aborts if self does not hold a value + + +
public fun extract<Element>(self: &mut option::Option<Element>): Element
+
+ + + +
+Implementation + + +
public fun extract<Element>(self: &mut Option<Element>): Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    vector::pop_back(&mut self.vec)
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Return a mutable reference to the value inside self +Aborts if self does not hold a value + + +
public fun borrow_mut<Element>(self: &mut option::Option<Element>): &mut Element
+
+ + + +
+Implementation + + +
public fun borrow_mut<Element>(self: &mut Option<Element>): &mut Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    vector::borrow_mut(&mut self.vec, 0)
+}
+
+ + + +
+ + + +## Function `swap` + +Swap the old value inside self with e and return the old value +Aborts if self does not hold a value + + +
public fun swap<Element>(self: &mut option::Option<Element>, e: Element): Element
+
+ + + +
+Implementation + + +
public fun swap<Element>(self: &mut Option<Element>, e: Element): Element {
+    assert!(is_some(self), EOPTION_NOT_SET);
+    let vec_ref = &mut self.vec;
+    let old_value = vector::pop_back(vec_ref);
+    vector::push_back(vec_ref, e);
+    old_value
+}
+
+ + + +
+ + + +## Function `swap_or_fill` + +Swap the old value inside self with e and return the old value; +or if there is no old value, fill it with e. +Different from swap(), swap_or_fill() allows for self not holding a value. + + +
public fun swap_or_fill<Element>(self: &mut option::Option<Element>, e: Element): option::Option<Element>
+
+ + + +
+Implementation + + +
public fun swap_or_fill<Element>(self: &mut Option<Element>, e: Element): Option<Element> {
+    let vec_ref = &mut self.vec;
+    let old_value = if (vector::is_empty(vec_ref)) none()
+        else some(vector::pop_back(vec_ref));
+    vector::push_back(vec_ref, e);
+    old_value
+}
+
+ + + +
+ + + +## Function `destroy_with_default` + +Destroys self. If self holds a value, return it. Returns default otherwise + + +
public fun destroy_with_default<Element: drop>(self: option::Option<Element>, default: Element): Element
+
+ + + +
+Implementation + + +
public fun destroy_with_default<Element: drop>(self: Option<Element>, default: Element): Element {
+    let Option { vec } = self;
+    if (vector::is_empty(&mut vec)) default
+    else vector::pop_back(&mut vec)
+}
+
+ + + +
+ + + +## Function `destroy_some` + +Unpack self and return its contents +Aborts if self does not hold a value + + +
public fun destroy_some<Element>(self: option::Option<Element>): Element
+
+ + + +
+Implementation + + +
public fun destroy_some<Element>(self: Option<Element>): Element {
+    assert!(is_some(&self), EOPTION_NOT_SET);
+    let Option { vec } = self;
+    let elem = vector::pop_back(&mut vec);
+    vector::destroy_empty(vec);
+    elem
+}
+
+ + + +
+ + + +## Function `destroy_none` + +Unpack self +Aborts if self holds a value + + +
public fun destroy_none<Element>(self: option::Option<Element>)
+
+ + + +
+Implementation + + +
public fun destroy_none<Element>(self: Option<Element>) {
+    assert!(is_none(&self), EOPTION_IS_SET);
+    let Option { vec } = self;
+    vector::destroy_empty(vec)
+}
+
+ + + +
+ + + +## Function `to_vec` + +Convert self into a vector of length 1 if it is Some, +and an empty vector otherwise + + +
public fun to_vec<Element>(self: option::Option<Element>): vector<Element>
+
+ + + +
+Implementation + + +
public fun to_vec<Element>(self: Option<Element>): vector<Element> {
+    let Option { vec } = self;
+    vec
+}
+
+ + + +
+ + + +## Function `for_each` + +Apply the function to the optional element, consuming it. Does nothing if no value present. + + +
public fun for_each<Element>(self: option::Option<Element>, f: |Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each<Element>(self: Option<Element>, f: |Element|) {
+    if (is_some(&self)) {
+        f(destroy_some(self))
+    } else {
+        destroy_none(self)
+    }
+}
+
+ + + +
+ + + +## Function `for_each_ref` + +Apply the function to the optional element reference. Does nothing if no value present. + + +
public fun for_each_ref<Element>(self: &option::Option<Element>, f: |&Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each_ref<Element>(self: &Option<Element>, f: |&Element|) {
+    if (is_some(self)) {
+        f(borrow(self))
+    }
+}
+
+ + + +
+ + + +## Function `for_each_mut` + +Apply the function to the optional element reference. Does nothing if no value present. + + +
public fun for_each_mut<Element>(self: &mut option::Option<Element>, f: |&mut Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each_mut<Element>(self: &mut Option<Element>, f: |&mut Element|) {
+    if (is_some(self)) {
+        f(borrow_mut(self))
+    }
+}
+
+ + + +
+ + + +## Function `fold` + +Folds the function over the optional element. + + +
public fun fold<Accumulator, Element>(self: option::Option<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
+
+ + + +
+Implementation + + +
public inline fun fold<Accumulator, Element>(
+    self: Option<Element>,
+    init: Accumulator,
+    f: |Accumulator,Element|Accumulator
+): Accumulator {
+    if (is_some(&self)) {
+        f(init, destroy_some(self))
+    } else {
+        destroy_none(self);
+        init
+    }
+}
+
+ + + +
+ + + +## Function `map` + +Maps the content of an option. + + +
public fun map<Element, OtherElement>(self: option::Option<Element>, f: |Element|OtherElement): option::Option<OtherElement>
+
+ + + +
+Implementation + + +
public inline fun map<Element, OtherElement>(self: Option<Element>, f: |Element|OtherElement): Option<OtherElement> {
+    if (is_some(&self)) {
+        some(f(destroy_some(self)))
+    } else {
+        destroy_none(self);
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `map_ref` + +Maps the content of an option without destroying the original option. + + +
public fun map_ref<Element, OtherElement>(self: &option::Option<Element>, f: |&Element|OtherElement): option::Option<OtherElement>
+
+ + + +
+Implementation + + +
public inline fun map_ref<Element, OtherElement>(
+    self: &Option<Element>, f: |&Element|OtherElement): Option<OtherElement> {
+    if (is_some(self)) {
+        some(f(borrow(self)))
+    } else {
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `filter` + +Filters the content of an option + + +
public fun filter<Element: drop>(self: option::Option<Element>, f: |&Element|bool): option::Option<Element>
+
+ + + +
+Implementation + + +
public inline fun filter<Element:drop>(self: Option<Element>, f: |&Element|bool): Option<Element> {
+    if (is_some(&self) && f(borrow(&self))) {
+        self
+    } else {
+        none()
+    }
+}
+
+ + + +
+ + + +## Function `any` + +Returns true if the option contains an element which satisfies predicate. + + +
public fun any<Element>(self: &option::Option<Element>, p: |&Element|bool): bool
+
+ + + +
+Implementation + + +
public inline fun any<Element>(self: &Option<Element>, p: |&Element|bool): bool {
+    is_some(self) && p(borrow(self))
+}
+
+ + + +
+ + + +## Function `destroy` + +Utility function to destroy an option that is not droppable. + + +
public fun destroy<Element>(self: option::Option<Element>, d: |Element|)
+
+ + + +
+Implementation + + +
public inline fun destroy<Element>(self: Option<Element>, d: |Element|) {
+    let vec = to_vec(self);
+    vector::destroy(vec, |e| d(e));
+}
+
+ + + +
+ + + +## Specification + + + + +
pragma aborts_if_is_strict;
+
+ + + + + +### Helper Schema + + + + + + +
schema AbortsIfNone<Element> {
+    self: Option<Element>;
+    aborts_if spec_is_none(self) with EOPTION_NOT_SET;
+}
+
+ + + + + +### Struct `Option` + + +
struct Option<Element> has copy, drop, store
+
+ + + +
+
+vec: vector<Element> +
+
+ +
+
+ + +The size of vector is always less than equal to 1 +because it's 0 for "none" or 1 for "some". + + +
invariant len(vec) <= 1;
+
+ + + + + +### Function `none` + + +
public fun none<Element>(): option::Option<Element>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_none<Element>();
+
+ + + + + + + +
fun spec_none<Element>(): Option<Element> {
+   Option{ vec: vec() }
+}
+
+ + + + + +### Function `some` + + +
public fun some<Element>(e: Element): option::Option<Element>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_some(e);
+
+ + + + + + + +
fun spec_some<Element>(e: Element): Option<Element> {
+   Option{ vec: vec(e) }
+}
+
+ + + + + +### Function `from_vec` + + +
public fun from_vec<Element>(vec: vector<Element>): option::Option<Element>
+
+ + + + +
aborts_if vector::length(vec) > 1;
+
+ + + + + +### Function `is_none` + + +
public fun is_none<Element>(self: &option::Option<Element>): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_is_none(self);
+
+ + + + + + + +
fun spec_is_none<Element>(self: Option<Element>): bool {
+   vector::is_empty(self.vec)
+}
+
+ + + + + +### Function `is_some` + + +
public fun is_some<Element>(self: &option::Option<Element>): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_is_some(self);
+
+ + + + + + + +
fun spec_is_some<Element>(self: Option<Element>): bool {
+   !vector::is_empty(self.vec)
+}
+
+ + + + + +### Function `contains` + + +
public fun contains<Element>(self: &option::Option<Element>, e_ref: &Element): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_contains(self, e_ref);
+
+ + + + + + + +
fun spec_contains<Element>(self: Option<Element>, e: Element): bool {
+   is_some(self) && borrow(self) == e
+}
+
+ + + + + +### Function `borrow` + + +
public fun borrow<Element>(self: &option::Option<Element>): &Element
+
+ + + + +
pragma opaque;
+include AbortsIfNone<Element>;
+ensures result == spec_borrow(self);
+
+ + + + + + + +
fun spec_borrow<Element>(self: Option<Element>): Element {
+   self.vec[0]
+}
+
+ + + + + +### Function `borrow_with_default` + + +
public fun borrow_with_default<Element>(self: &option::Option<Element>, default_ref: &Element): &Element
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default_ref);
+
+ + + + + +### Function `get_with_default` + + +
public fun get_with_default<Element: copy, drop>(self: &option::Option<Element>, default: Element): Element
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default);
+
+ + + + + +### Function `fill` + + +
public fun fill<Element>(self: &mut option::Option<Element>, e: Element)
+
+ + + + +
pragma opaque;
+aborts_if spec_is_some(self) with EOPTION_IS_SET;
+ensures spec_is_some(self);
+ensures spec_borrow(self) == e;
+
+ + + + + +### Function `extract` + + +
public fun extract<Element>(self: &mut option::Option<Element>): Element
+
+ + + + +
pragma opaque;
+include AbortsIfNone<Element>;
+ensures result == spec_borrow(old(self));
+ensures spec_is_none(self);
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<Element>(self: &mut option::Option<Element>): &mut Element
+
+ + + + +
include AbortsIfNone<Element>;
+ensures result == spec_borrow(self);
+ensures self == old(self);
+
+ + + + + +### Function `swap` + + +
public fun swap<Element>(self: &mut option::Option<Element>, e: Element): Element
+
+ + + + +
pragma opaque;
+include AbortsIfNone<Element>;
+ensures result == spec_borrow(old(self));
+ensures spec_is_some(self);
+ensures spec_borrow(self) == e;
+
+ + + + + +### Function `swap_or_fill` + + +
public fun swap_or_fill<Element>(self: &mut option::Option<Element>, e: Element): option::Option<Element>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == old(self);
+ensures spec_borrow(self) == e;
+
+ + + + + +### Function `destroy_with_default` + + +
public fun destroy_with_default<Element: drop>(self: option::Option<Element>, default: Element): Element
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default);
+
+ + + + + +### Function `destroy_some` + + +
public fun destroy_some<Element>(self: option::Option<Element>): Element
+
+ + + + +
pragma opaque;
+include AbortsIfNone<Element>;
+ensures result == spec_borrow(self);
+
+ + + + + +### Function `destroy_none` + + +
public fun destroy_none<Element>(self: option::Option<Element>)
+
+ + + + +
pragma opaque;
+aborts_if spec_is_some(self) with EOPTION_IS_SET;
+
+ + + + + +### Function `to_vec` + + +
public fun to_vec<Element>(self: option::Option<Element>): vector<Element>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == self.vec;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/overview.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/overview.md new file mode 100644 index 0000000000000..8eb0c67f05113 --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/overview.md @@ -0,0 +1,29 @@ + + + +# Move Stdlib Modules + + +This is the reference documentation of the Move standard library. +For on overview of the Move language, see the [Move Book][move-book]. + + + + +## Index + + +- [`0x1::acl`](acl.md#0x1_acl) +- [`0x1::bcs`](bcs.md#0x1_bcs) +- [`0x1::bit_vector`](bit_vector.md#0x1_bit_vector) +- [`0x1::error`](error.md#0x1_error) +- [`0x1::features`](features.md#0x1_features) +- [`0x1::fixed_point32`](fixed_point32.md#0x1_fixed_point32) +- [`0x1::hash`](hash.md#0x1_hash) +- [`0x1::option`](option.md#0x1_option) +- [`0x1::signer`](signer.md#0x1_signer) +- [`0x1::string`](string.md#0x1_string) +- [`0x1::vector`](vector.md#0x1_vector) + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/signer.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/signer.md new file mode 100644 index 0000000000000..f6de8799b571c --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/signer.md @@ -0,0 +1,94 @@ + + + +# Module `0x1::signer` + + + +- [Function `borrow_address`](#0x1_signer_borrow_address) +- [Function `address_of`](#0x1_signer_address_of) +- [Specification](#@Specification_0) + + +
+ + + + + +## Function `borrow_address` + +Borrows the address of the signer +Conceptually, you can think of the signer as being a struct wrapper around an +address +``` +struct signer has drop { addr: address } +``` +borrow_address borrows this inner field + + +
public fun borrow_address(s: &signer): &address
+
+ + + +
+Implementation + + +
native public fun borrow_address(s: &signer): &address;
+
+ + + +
+ + + +## Function `address_of` + + + +
public fun address_of(s: &signer): address
+
+ + + +
+Implementation + + +
public fun address_of(s: &signer): address {
+    *borrow_address(s)
+}
+
+ + + +
+ + + +## Specification + +Return true only if s is a transaction signer. This is a spec function only available in spec. + + + + + +
native fun is_txn_signer(s: signer): bool;
+
+ + +Return true only if a is a transaction signer address. This is a spec function only available in spec. + + + + + +
native fun is_txn_signer_addr(a: address): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/string.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/string.md new file mode 100644 index 0000000000000..319b862698c08 --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/string.md @@ -0,0 +1,552 @@ + + + +# Module `0x1::string` + +The string module defines the String type which represents UTF8 encoded strings. + + +- [Struct `String`](#0x1_string_String) +- [Constants](#@Constants_0) +- [Function `utf8`](#0x1_string_utf8) +- [Function `try_utf8`](#0x1_string_try_utf8) +- [Function `bytes`](#0x1_string_bytes) +- [Function `is_empty`](#0x1_string_is_empty) +- [Function `length`](#0x1_string_length) +- [Function `append`](#0x1_string_append) +- [Function `append_utf8`](#0x1_string_append_utf8) +- [Function `insert`](#0x1_string_insert) +- [Function `sub_string`](#0x1_string_sub_string) +- [Function `index_of`](#0x1_string_index_of) +- [Function `internal_check_utf8`](#0x1_string_internal_check_utf8) +- [Function `internal_is_char_boundary`](#0x1_string_internal_is_char_boundary) +- [Function `internal_sub_string`](#0x1_string_internal_sub_string) +- [Function `internal_index_of`](#0x1_string_internal_index_of) +- [Specification](#@Specification_1) + - [Function `internal_check_utf8`](#@Specification_1_internal_check_utf8) + - [Function `internal_is_char_boundary`](#@Specification_1_internal_is_char_boundary) + - [Function `internal_sub_string`](#@Specification_1_internal_sub_string) + - [Function `internal_index_of`](#@Specification_1_internal_index_of) + + +
use 0x1::option;
+use 0x1::vector;
+
+ + + + + +## Struct `String` + +A String holds a sequence of bytes which is guaranteed to be in utf8 format. + + +
struct String has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Index out of range. + + +
const EINVALID_INDEX: u64 = 2;
+
+ + + + + +An invalid UTF8 encoding. + + +
const EINVALID_UTF8: u64 = 1;
+
+ + + + + +## Function `utf8` + +Creates a new string from a sequence of bytes. Aborts if the bytes do not represent valid utf8. + + +
public fun utf8(bytes: vector<u8>): string::String
+
+ + + +
+Implementation + + +
public fun utf8(bytes: vector<u8>): String {
+    assert!(internal_check_utf8(&bytes), EINVALID_UTF8);
+    String{bytes}
+}
+
+ + + +
+ + + +## Function `try_utf8` + +Tries to create a new string from a sequence of bytes. + + +
public fun try_utf8(bytes: vector<u8>): option::Option<string::String>
+
+ + + +
+Implementation + + +
public fun try_utf8(bytes: vector<u8>): Option<String> {
+    if (internal_check_utf8(&bytes)) {
+        option::some(String{bytes})
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `bytes` + +Returns a reference to the underlying byte vector. + + +
public fun bytes(self: &string::String): &vector<u8>
+
+ + + +
+Implementation + + +
public fun bytes(self: &String): &vector<u8> {
+    &self.bytes
+}
+
+ + + +
+ + + +## Function `is_empty` + +Checks whether this string is empty. + + +
public fun is_empty(self: &string::String): bool
+
+ + + +
+Implementation + + +
public fun is_empty(self: &String): bool {
+    vector::is_empty(&self.bytes)
+}
+
+ + + +
+ + + +## Function `length` + +Returns the length of this string, in bytes. + + +
public fun length(self: &string::String): u64
+
+ + + +
+Implementation + + +
public fun length(self: &String): u64 {
+    vector::length(&self.bytes)
+}
+
+ + + +
+ + + +## Function `append` + +Appends a string. + + +
public fun append(self: &mut string::String, r: string::String)
+
+ + + +
+Implementation + + +
public fun append(self: &mut String, r: String) {
+    vector::append(&mut self.bytes, r.bytes)
+}
+
+ + + +
+ + + +## Function `append_utf8` + +Appends bytes which must be in valid utf8 format. + + +
public fun append_utf8(self: &mut string::String, bytes: vector<u8>)
+
+ + + +
+Implementation + + +
public fun append_utf8(self: &mut String, bytes: vector<u8>) {
+    append(self, utf8(bytes))
+}
+
+ + + +
+ + + +## Function `insert` + +Insert the other string at the byte index in given string. The index must be at a valid utf8 char +boundary. + + +
public fun insert(self: &mut string::String, at: u64, o: string::String)
+
+ + + +
+Implementation + + +
public fun insert(self: &mut String, at: u64, o: String) {
+    let bytes = &self.bytes;
+    assert!(at <= vector::length(bytes) && internal_is_char_boundary(bytes, at), EINVALID_INDEX);
+    let l = length(self);
+    let front = sub_string(self, 0, at);
+    let end = sub_string(self, at, l);
+    append(&mut front, o);
+    append(&mut front, end);
+    *self = front;
+}
+
+ + + +
+ + + +## Function `sub_string` + +Returns a sub-string using the given byte indices, where i is the first byte position and j is the start +of the first byte not included (or the length of the string). The indices must be at valid utf8 char boundaries, +guaranteeing that the result is valid utf8. + + +
public fun sub_string(self: &string::String, i: u64, j: u64): string::String
+
+ + + +
+Implementation + + +
public fun sub_string(self: &String, i: u64, j: u64): String {
+    let bytes = &self.bytes;
+    let l = vector::length(bytes);
+    assert!(
+        j <= l && i <= j && internal_is_char_boundary(bytes, i) && internal_is_char_boundary(bytes, j),
+        EINVALID_INDEX
+    );
+    String { bytes: internal_sub_string(bytes, i, j) }
+}
+
+ + + +
+ + + +## Function `index_of` + +Computes the index of the first occurrence of a string. Returns length(s) if no occurrence found. + + +
public fun index_of(self: &string::String, r: &string::String): u64
+
+ + + +
+Implementation + + +
public fun index_of(self: &String, r: &String): u64 {
+    internal_index_of(&self.bytes, &r.bytes)
+}
+
+ + + +
+ + + +## Function `internal_check_utf8` + + + +
public fun internal_check_utf8(v: &vector<u8>): bool
+
+ + + +
+Implementation + + +
public native fun internal_check_utf8(v: &vector<u8>): bool;
+
+ + + +
+ + + +## Function `internal_is_char_boundary` + + + +
fun internal_is_char_boundary(v: &vector<u8>, i: u64): bool
+
+ + + +
+Implementation + + +
native fun internal_is_char_boundary(v: &vector<u8>, i: u64): bool;
+
+ + + +
+ + + +## Function `internal_sub_string` + + + +
fun internal_sub_string(v: &vector<u8>, i: u64, j: u64): vector<u8>
+
+ + + +
+Implementation + + +
native fun internal_sub_string(v: &vector<u8>, i: u64, j: u64): vector<u8>;
+
+ + + +
+ + + +## Function `internal_index_of` + + + +
fun internal_index_of(v: &vector<u8>, r: &vector<u8>): u64
+
+ + + +
+Implementation + + +
native fun internal_index_of(v: &vector<u8>, r: &vector<u8>): u64;
+
+ + + +
+ + + +## Specification + + + + +### Function `internal_check_utf8` + + +
public fun internal_check_utf8(v: &vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_internal_check_utf8(v);
+
+ + + + + +### Function `internal_is_char_boundary` + + +
fun internal_is_char_boundary(v: &vector<u8>, i: u64): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_internal_is_char_boundary(v, i);
+
+ + + + + +### Function `internal_sub_string` + + +
fun internal_sub_string(v: &vector<u8>, i: u64, j: u64): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_internal_sub_string(v, i, j);
+
+ + + + + +### Function `internal_index_of` + + +
fun internal_index_of(v: &vector<u8>, r: &vector<u8>): u64
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_internal_index_of(v, r);
+
+ + + + + + + +
fun spec_utf8(bytes: vector<u8>): String {
+   String{bytes}
+}
+
+ + + + + + + +
fun spec_internal_check_utf8(v: vector<u8>): bool;
+
+fun spec_internal_is_char_boundary(v: vector<u8>, i: u64): bool;
+
+fun spec_internal_sub_string(v: vector<u8>, i: u64, j: u64): vector<u8>;
+
+fun spec_internal_index_of(v: vector<u8>, r: vector<u8>): u64;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/vector.md b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/vector.md new file mode 100644 index 0000000000000..d5e6a7bfa2ecd --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/compiler-v2-doc/vector.md @@ -0,0 +1,2033 @@ + + + +# Module `0x1::vector` + +A variable-sized container that can hold any type. Indexing is 0-based, and +vectors are growable. This module has many native functions. +Verification of modules that use this one uses model functions that are implemented +directly in Boogie. The specification language has built-in functions operations such +as singleton_vector. There are some helper functions defined here for specifications in other +modules as well. + +>Note: We did not verify most of the +Move functions here because many have loops, requiring loop invariants to prove, and +the return on investment didn't seem worth it for these simple functions. + + +- [Constants](#@Constants_0) +- [Function `empty`](#0x1_vector_empty) +- [Function `length`](#0x1_vector_length) +- [Function `borrow`](#0x1_vector_borrow) +- [Function `push_back`](#0x1_vector_push_back) +- [Function `borrow_mut`](#0x1_vector_borrow_mut) +- [Function `pop_back`](#0x1_vector_pop_back) +- [Function `destroy_empty`](#0x1_vector_destroy_empty) +- [Function `swap`](#0x1_vector_swap) +- [Function `singleton`](#0x1_vector_singleton) +- [Function `reverse`](#0x1_vector_reverse) +- [Function `reverse_slice`](#0x1_vector_reverse_slice) +- [Function `append`](#0x1_vector_append) +- [Function `reverse_append`](#0x1_vector_reverse_append) +- [Function `trim`](#0x1_vector_trim) +- [Function `trim_reverse`](#0x1_vector_trim_reverse) +- [Function `is_empty`](#0x1_vector_is_empty) +- [Function `contains`](#0x1_vector_contains) +- [Function `index_of`](#0x1_vector_index_of) +- [Function `find`](#0x1_vector_find) +- [Function `insert`](#0x1_vector_insert) +- [Function `remove`](#0x1_vector_remove) +- [Function `remove_value`](#0x1_vector_remove_value) +- [Function `swap_remove`](#0x1_vector_swap_remove) +- [Function `for_each`](#0x1_vector_for_each) +- [Function `for_each_reverse`](#0x1_vector_for_each_reverse) +- [Function `for_each_ref`](#0x1_vector_for_each_ref) +- [Function `zip`](#0x1_vector_zip) +- [Function `zip_reverse`](#0x1_vector_zip_reverse) +- [Function `zip_ref`](#0x1_vector_zip_ref) +- [Function `enumerate_ref`](#0x1_vector_enumerate_ref) +- [Function `for_each_mut`](#0x1_vector_for_each_mut) +- [Function `zip_mut`](#0x1_vector_zip_mut) +- [Function `enumerate_mut`](#0x1_vector_enumerate_mut) +- [Function `fold`](#0x1_vector_fold) +- [Function `foldr`](#0x1_vector_foldr) +- [Function `map_ref`](#0x1_vector_map_ref) +- [Function `zip_map_ref`](#0x1_vector_zip_map_ref) +- [Function `map`](#0x1_vector_map) +- [Function `zip_map`](#0x1_vector_zip_map) +- [Function `filter`](#0x1_vector_filter) +- [Function `partition`](#0x1_vector_partition) +- [Function `rotate`](#0x1_vector_rotate) +- [Function `rotate_slice`](#0x1_vector_rotate_slice) +- [Function `stable_partition`](#0x1_vector_stable_partition) +- [Function `any`](#0x1_vector_any) +- [Function `all`](#0x1_vector_all) +- [Function `destroy`](#0x1_vector_destroy) +- [Function `range`](#0x1_vector_range) +- [Function `range_with_step`](#0x1_vector_range_with_step) +- [Function `slice`](#0x1_vector_slice) +- [Specification](#@Specification_1) + - [Helper Functions](#@Helper_Functions_2) + - [Function `singleton`](#@Specification_1_singleton) + - [Function `reverse`](#@Specification_1_reverse) + - [Function `reverse_slice`](#@Specification_1_reverse_slice) + - [Function `append`](#@Specification_1_append) + - [Function `reverse_append`](#@Specification_1_reverse_append) + - [Function `trim`](#@Specification_1_trim) + - [Function `trim_reverse`](#@Specification_1_trim_reverse) + - [Function `is_empty`](#@Specification_1_is_empty) + - [Function `contains`](#@Specification_1_contains) + - [Function `index_of`](#@Specification_1_index_of) + - [Function `insert`](#@Specification_1_insert) + - [Function `remove`](#@Specification_1_remove) + - [Function `remove_value`](#@Specification_1_remove_value) + - [Function `swap_remove`](#@Specification_1_swap_remove) + - [Function `rotate`](#@Specification_1_rotate) + - [Function `rotate_slice`](#@Specification_1_rotate_slice) + + +
+ + + + + +## Constants + + + + +The index into the vector is out of bounds + + +
const EINDEX_OUT_OF_BOUNDS: u64 = 131072;
+
+ + + + + +The index into the vector is out of bounds + + +
const EINVALID_RANGE: u64 = 131073;
+
+ + + + + +The range in slice is invalid. + + +
const EINVALID_SLICE_RANGE: u64 = 131076;
+
+ + + + + +The step provided in range is invalid, must be greater than zero. + + +
const EINVALID_STEP: u64 = 131075;
+
+ + + + + +The length of the vectors are not equal. + + +
const EVECTORS_LENGTH_MISMATCH: u64 = 131074;
+
+ + + + + +## Function `empty` + +Create an empty vector. + + +
#[bytecode_instruction]
+public fun empty<Element>(): vector<Element>
+
+ + + +
+Implementation + + +
native public fun empty<Element>(): vector<Element>;
+
+ + + +
+ + + +## Function `length` + +Return the length of the vector. + + +
#[bytecode_instruction]
+public fun length<Element>(self: &vector<Element>): u64
+
+ + + +
+Implementation + + +
native public fun length<Element>(self: &vector<Element>): u64;
+
+ + + +
+ + + +## Function `borrow` + +Acquire an immutable reference to the ith element of the vector self. +Aborts if i is out of bounds. + + +
#[bytecode_instruction]
+public fun borrow<Element>(self: &vector<Element>, i: u64): &Element
+
+ + + +
+Implementation + + +
native public fun borrow<Element>(self: &vector<Element>, i: u64): ∈
+
+ + + +
+ + + +## Function `push_back` + +Add element e to the end of the vector self. + + +
#[bytecode_instruction]
+public fun push_back<Element>(self: &mut vector<Element>, e: Element)
+
+ + + +
+Implementation + + +
native public fun push_back<Element>(self: &mut vector<Element>, e: Element);
+
+ + + +
+ + + +## Function `borrow_mut` + +Return a mutable reference to the ith element in the vector self. +Aborts if i is out of bounds. + + +
#[bytecode_instruction]
+public fun borrow_mut<Element>(self: &mut vector<Element>, i: u64): &mut Element
+
+ + + +
+Implementation + + +
native public fun borrow_mut<Element>(self: &mut vector<Element>, i: u64): &mut Element;
+
+ + + +
+ + + +## Function `pop_back` + +Pop an element from the end of vector self. +Aborts if self is empty. + + +
#[bytecode_instruction]
+public fun pop_back<Element>(self: &mut vector<Element>): Element
+
+ + + +
+Implementation + + +
native public fun pop_back<Element>(self: &mut vector<Element>): Element;
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroy the vector self. +Aborts if self is not empty. + + +
#[bytecode_instruction]
+public fun destroy_empty<Element>(self: vector<Element>)
+
+ + + +
+Implementation + + +
native public fun destroy_empty<Element>(self: vector<Element>);
+
+ + + +
+ + + +## Function `swap` + +Swaps the elements at the ith and jth indices in the vector self. +Aborts if i or j is out of bounds. + + +
#[bytecode_instruction]
+public fun swap<Element>(self: &mut vector<Element>, i: u64, j: u64)
+
+ + + +
+Implementation + + +
native public fun swap<Element>(self: &mut vector<Element>, i: u64, j: u64);
+
+ + + +
+ + + +## Function `singleton` + +Return an vector of size one containing element e. + + +
public fun singleton<Element>(e: Element): vector<Element>
+
+ + + +
+Implementation + + +
public fun singleton<Element>(e: Element): vector<Element> {
+    let v = empty();
+    push_back(&mut v, e);
+    v
+}
+
+ + + +
+ + + +## Function `reverse` + +Reverses the order of the elements in the vector self in place. + + +
public fun reverse<Element>(self: &mut vector<Element>)
+
+ + + +
+Implementation + + +
public fun reverse<Element>(self: &mut vector<Element>) {
+    let len = length(self);
+    reverse_slice(self, 0, len);
+}
+
+ + + +
+ + + +## Function `reverse_slice` + +Reverses the order of the elements [left, right) in the vector self in place. + + +
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64)
+
+ + + +
+Implementation + + +
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64) {
+    assert!(left <= right, EINVALID_RANGE);
+    if (left == right) return;
+    right = right - 1;
+    while (left < right) {
+        swap(self, left, right);
+        left = left + 1;
+        right = right - 1;
+    }
+}
+
+ + + +
+ + + +## Function `append` + +Pushes all of the elements of the other vector into the self vector. + + +
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
+
+ + + +
+Implementation + + +
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>) {
+    reverse(&mut other);
+    reverse_append(self, other);
+}
+
+ + + +
+ + + +## Function `reverse_append` + +Pushes all of the elements of the other vector into the self vector. + + +
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>)
+
+ + + +
+Implementation + + +
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>) {
+    let len = length(&other);
+    while (len > 0) {
+        push_back(self, pop_back(&mut other));
+        len = len - 1;
+    };
+    destroy_empty(other);
+}
+
+ + + +
+ + + +## Function `trim` + +Trim a vector to a smaller size, returning the evicted elements in order + + +
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
+
+ + + +
+Implementation + + +
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element> {
+    let res = trim_reverse(self, new_len);
+    reverse(&mut res);
+    res
+}
+
+ + + +
+ + + +## Function `trim_reverse` + +Trim a vector to a smaller size, returning the evicted elements in reverse order + + +
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
+
+ + + +
+Implementation + + +
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element> {
+    let len = length(self);
+    assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS);
+    let result = empty();
+    while (new_len < len) {
+        push_back(&mut result, pop_back(self));
+        len = len - 1;
+    };
+    result
+}
+
+ + + +
+ + + +## Function `is_empty` + +Return true if the vector self has no elements and false otherwise. + + +
public fun is_empty<Element>(self: &vector<Element>): bool
+
+ + + +
+Implementation + + +
public fun is_empty<Element>(self: &vector<Element>): bool {
+    length(self) == 0
+}
+
+ + + +
+ + + +## Function `contains` + +Return true if e is in the vector self. + + +
public fun contains<Element>(self: &vector<Element>, e: &Element): bool
+
+ + + +
+Implementation + + +
public fun contains<Element>(self: &vector<Element>, e: &Element): bool {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        if (borrow(self, i) == e) return true;
+        i = i + 1;
+    };
+    false
+}
+
+ + + +
+ + + +## Function `index_of` + +Return (true, i) if e is in the vector self at index i. +Otherwise, returns (false, 0). + + +
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64)
+
+ + + +
+Implementation + + +
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64) {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        if (borrow(self, i) == e) return (true, i);
+        i = i + 1;
+    };
+    (false, 0)
+}
+
+ + + +
+ + + +## Function `find` + +Return (true, i) if there's an element that matches the predicate. If there are multiple elements that match +the predicate, only the index of the first one is returned. +Otherwise, returns (false, 0). + + +
public fun find<Element>(self: &vector<Element>, f: |&Element|bool): (bool, u64)
+
+ + + +
+Implementation + + +
public inline fun find<Element>(self: &vector<Element>, f: |&Element|bool): (bool, u64) {
+    let find = false;
+    let found_index = 0;
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        // Cannot call return in an inline function so we need to resort to break here.
+        if (f(borrow(self, i))) {
+            find = true;
+            found_index = i;
+            break
+        };
+        i = i + 1;
+    };
+    (find, found_index)
+}
+
+ + + +
+ + + +## Function `insert` + +Insert a new element at position 0 <= i <= length, using O(length - i) time. +Aborts if out of bounds. + + +
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
+
+ + + +
+Implementation + + +
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element) {
+    let len = length(self);
+    assert!(i <= len, EINDEX_OUT_OF_BOUNDS);
+    push_back(self, e);
+    while (i < len) {
+        swap(self, i, len);
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `remove` + +Remove the ith element of the vector self, shifting all subsequent elements. +This is O(n) and preserves ordering of elements in the vector. +Aborts if i is out of bounds. + + +
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
+
+ + + +
+Implementation + + +
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    let len = length(self);
+    // i out of bounds; abort
+    if (i >= len) abort EINDEX_OUT_OF_BOUNDS;
+
+    len = len - 1;
+    while (i < len) swap(self, i, { i = i + 1; i });
+    pop_back(self)
+}
+
+ + + +
+ + + +## Function `remove_value` + +Remove the first occurrence of a given value in the vector self and return it in a vector, shifting all +subsequent elements. +This is O(n) and preserves ordering of elements in the vector. +This returns an empty vector if the value isn't present in the vector. +Note that this cannot return an option as option uses vector and there'd be a circular dependency between option +and vector. + + +
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element>
+
+ + + +
+Implementation + + +
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element> {
+    // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found,
+    // while remove would continue from the identified index to the end of the vector.
+    let (found, index) = index_of(self, val);
+    if (found) {
+        vector[remove(self, index)]
+    } else {
+       vector[]
+    }
+}
+
+ + + +
+ + + +## Function `swap_remove` + +Swap the ith element of the vector self with the last element and then pop the vector. +This is O(1), but does not preserve ordering of elements in the vector. +Aborts if i is out of bounds. + + +
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element
+
+ + + +
+Implementation + + +
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    assert!(!is_empty(self), EINDEX_OUT_OF_BOUNDS);
+    let last_idx = length(self) - 1;
+    swap(self, i, last_idx);
+    pop_back(self)
+}
+
+ + + +
+ + + +## Function `for_each` + +Apply the function to each element in the vector, consuming it. + + +
public fun for_each<Element>(self: vector<Element>, f: |Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each<Element>(self: vector<Element>, f: |Element|) {
+    reverse(&mut self); // We need to reverse the vector to consume it efficiently
+    for_each_reverse(self, |e| f(e));
+}
+
+ + + +
+ + + +## Function `for_each_reverse` + +Apply the function to each element in the vector, consuming it. + + +
public fun for_each_reverse<Element>(self: vector<Element>, f: |Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each_reverse<Element>(self: vector<Element>, f: |Element|) {
+    let len = length(&self);
+    while (len > 0) {
+        f(pop_back(&mut self));
+        len = len - 1;
+    };
+    destroy_empty(self)
+}
+
+ + + +
+ + + +## Function `for_each_ref` + +Apply the function to a reference of each element in the vector. + + +
public fun for_each_ref<Element>(self: &vector<Element>, f: |&Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each_ref<Element>(self: &vector<Element>, f: |&Element|) {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        f(borrow(self, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `zip` + +Apply the function to each pair of elements in the two given vectors, consuming them. + + +
public fun zip<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
+
+ + + +
+Implementation + + +
public inline fun zip<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |Element1, Element2|) {
+    // We need to reverse the vectors to consume it efficiently
+    reverse(&mut self);
+    reverse(&mut v2);
+    zip_reverse(self, v2, |e1, e2| f(e1, e2));
+}
+
+ + + +
+ + + +## Function `zip_reverse` + +Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. +This errors out if the vectors are not of the same length. + + +
public fun zip_reverse<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
+
+ + + +
+Implementation + + +
public inline fun zip_reverse<Element1, Element2>(
+    self: vector<Element1>,
+    v2: vector<Element2>,
+    f: |Element1, Element2|,
+) {
+    let len = length(&self);
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == length(&v2), 0x20002);
+    while (len > 0) {
+        f(pop_back(&mut self), pop_back(&mut v2));
+        len = len - 1;
+    };
+    destroy_empty(self);
+    destroy_empty(v2);
+}
+
+ + + +
+ + + +## Function `zip_ref` + +Apply the function to the references of each pair of elements in the two given vectors. +This errors out if the vectors are not of the same length. + + +
public fun zip_ref<Element1, Element2>(self: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|)
+
+ + + +
+Implementation + + +
public inline fun zip_ref<Element1, Element2>(
+    self: &vector<Element1>,
+    v2: &vector<Element2>,
+    f: |&Element1, &Element2|,
+) {
+    let len = length(self);
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == length(v2), 0x20002);
+    let i = 0;
+    while (i < len) {
+        f(borrow(self, i), borrow(v2, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `enumerate_ref` + +Apply the function to a reference of each element in the vector with its index. + + +
public fun enumerate_ref<Element>(self: &vector<Element>, f: |(u64, &Element)|)
+
+ + + +
+Implementation + + +
public inline fun enumerate_ref<Element>(self: &vector<Element>, f: |u64, &Element|) {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        f(i, borrow(self, i));
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `for_each_mut` + +Apply the function to a mutable reference to each element in the vector. + + +
public fun for_each_mut<Element>(self: &mut vector<Element>, f: |&mut Element|)
+
+ + + +
+Implementation + + +
public inline fun for_each_mut<Element>(self: &mut vector<Element>, f: |&mut Element|) {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        f(borrow_mut(self, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `zip_mut` + +Apply the function to mutable references to each pair of elements in the two given vectors. +This errors out if the vectors are not of the same length. + + +
public fun zip_mut<Element1, Element2>(self: &mut vector<Element1>, v2: &mut vector<Element2>, f: |(&mut Element1, &mut Element2)|)
+
+ + + +
+Implementation + + +
public inline fun zip_mut<Element1, Element2>(
+    self: &mut vector<Element1>,
+    v2: &mut vector<Element2>,
+    f: |&mut Element1, &mut Element2|,
+) {
+    let i = 0;
+    let len = length(self);
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(len == length(v2), 0x20002);
+    while (i < len) {
+        f(borrow_mut(self, i), borrow_mut(v2, i));
+        i = i + 1
+    }
+}
+
+ + + +
+ + + +## Function `enumerate_mut` + +Apply the function to a mutable reference of each element in the vector with its index. + + +
public fun enumerate_mut<Element>(self: &mut vector<Element>, f: |(u64, &mut Element)|)
+
+ + + +
+Implementation + + +
public inline fun enumerate_mut<Element>(self: &mut vector<Element>, f: |u64, &mut Element|) {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        f(i, borrow_mut(self, i));
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `fold` + +Fold the function over the elements. For example, fold(vector[1,2,3], 0, f) will execute +f(f(f(0, 1), 2), 3) + + +
public fun fold<Accumulator, Element>(self: vector<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
+
+ + + +
+Implementation + + +
public inline fun fold<Accumulator, Element>(
+    self: vector<Element>,
+    init: Accumulator,
+    f: |Accumulator,Element|Accumulator
+): Accumulator {
+    let accu = init;
+    for_each(self, |elem| accu = f(accu, elem));
+    accu
+}
+
+ + + +
+ + + +## Function `foldr` + +Fold right like fold above but working right to left. For example, fold(vector[1,2,3], 0, f) will execute +f(1, f(2, f(3, 0))) + + +
public fun foldr<Accumulator, Element>(self: vector<Element>, init: Accumulator, f: |(Element, Accumulator)|Accumulator): Accumulator
+
+ + + +
+Implementation + + +
public inline fun foldr<Accumulator, Element>(
+    self: vector<Element>,
+    init: Accumulator,
+    f: |Element, Accumulator|Accumulator
+): Accumulator {
+    let accu = init;
+    for_each_reverse(self, |elem| accu = f(elem, accu));
+    accu
+}
+
+ + + +
+ + + +## Function `map_ref` + +Map the function over the references of the elements of the vector, producing a new vector without modifying the +original vector. + + +
public fun map_ref<Element, NewElement>(self: &vector<Element>, f: |&Element|NewElement): vector<NewElement>
+
+ + + +
+Implementation + + +
public inline fun map_ref<Element, NewElement>(
+    self: &vector<Element>,
+    f: |&Element|NewElement
+): vector<NewElement> {
+    let result = vector<NewElement>[];
+    for_each_ref(self, |elem| push_back(&mut result, f(elem)));
+    result
+}
+
+ + + +
+ + + +## Function `zip_map_ref` + +Map the function over the references of the element pairs of two vectors, producing a new vector from the return +values without modifying the original vectors. + + +
public fun zip_map_ref<Element1, Element2, NewElement>(self: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|NewElement): vector<NewElement>
+
+ + + +
+Implementation + + +
public inline fun zip_map_ref<Element1, Element2, NewElement>(
+    self: &vector<Element1>,
+    v2: &vector<Element2>,
+    f: |&Element1, &Element2|NewElement
+): vector<NewElement> {
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(length(self) == length(v2), 0x20002);
+
+    let result = vector<NewElement>[];
+    zip_ref(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    result
+}
+
+ + + +
+ + + +## Function `map` + +Map the function over the elements of the vector, producing a new vector. + + +
public fun map<Element, NewElement>(self: vector<Element>, f: |Element|NewElement): vector<NewElement>
+
+ + + +
+Implementation + + +
public inline fun map<Element, NewElement>(
+    self: vector<Element>,
+    f: |Element|NewElement
+): vector<NewElement> {
+    let result = vector<NewElement>[];
+    for_each(self, |elem| push_back(&mut result, f(elem)));
+    result
+}
+
+ + + +
+ + + +## Function `zip_map` + +Map the function over the element pairs of the two vectors, producing a new vector. + + +
public fun zip_map<Element1, Element2, NewElement>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|NewElement): vector<NewElement>
+
+ + + +
+Implementation + + +
public inline fun zip_map<Element1, Element2, NewElement>(
+    self: vector<Element1>,
+    v2: vector<Element2>,
+    f: |Element1, Element2|NewElement
+): vector<NewElement> {
+    // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
+    // due to how inline functions work.
+    assert!(length(&self) == length(&v2), 0x20002);
+
+    let result = vector<NewElement>[];
+    zip(self, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    result
+}
+
+ + + +
+ + + +## Function `filter` + +Filter the vector using the boolean function, removing all elements for which p(e) is not true. + + +
public fun filter<Element: drop>(self: vector<Element>, p: |&Element|bool): vector<Element>
+
+ + + +
+Implementation + + +
public inline fun filter<Element:drop>(
+    self: vector<Element>,
+    p: |&Element|bool
+): vector<Element> {
+    let result = vector<Element>[];
+    for_each(self, |elem| {
+        if (p(&elem)) push_back(&mut result, elem);
+    });
+    result
+}
+
+ + + +
+ + + +## Function `partition` + +Partition, sorts all elements for which pred is true to the front. +Preserves the relative order of the elements for which pred is true, +BUT NOT for the elements for which pred is false. + + +
public fun partition<Element>(self: &mut vector<Element>, pred: |&Element|bool): u64
+
+ + + +
+Implementation + + +
public inline fun partition<Element>(
+    self: &mut vector<Element>,
+    pred: |&Element|bool
+): u64 {
+    let i = 0;
+    let len = length(self);
+    while (i < len) {
+        if (!pred(borrow(self, i))) break;
+        i = i + 1;
+    };
+    let p = i;
+    i = i + 1;
+    while (i < len) {
+        if (pred(borrow(self, i))) {
+            swap(self, p, i);
+            p = p + 1;
+        };
+        i = i + 1;
+    };
+    p
+}
+
+ + + +
+ + + +## Function `rotate` + +rotate(&mut [1, 2, 3, 4, 5], 2) -> [3, 4, 5, 1, 2] in place, returns the split point +ie. 3 in the example above + + +
public fun rotate<Element>(self: &mut vector<Element>, rot: u64): u64
+
+ + + +
+Implementation + + +
public fun rotate<Element>(
+    self: &mut vector<Element>,
+    rot: u64
+): u64 {
+    let len = length(self);
+    rotate_slice(self, 0, rot, len)
+}
+
+ + + +
+ + + +## Function `rotate_slice` + +Same as above but on a sub-slice of an array [left, right) with left <= rot <= right +returns the + + +
public fun rotate_slice<Element>(self: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
+
+ + + +
+Implementation + + +
public fun rotate_slice<Element>(
+    self: &mut vector<Element>,
+    left: u64,
+    rot: u64,
+    right: u64
+): u64 {
+    reverse_slice(self, left, rot);
+    reverse_slice(self, rot, right);
+    reverse_slice(self, left, right);
+    left + (right - rot)
+}
+
+ + + +
+ + + +## Function `stable_partition` + +Partition the array based on a predicate p, this routine is stable and thus +preserves the relative order of the elements in the two partitions. + + +
public fun stable_partition<Element>(self: &mut vector<Element>, p: |&Element|bool): u64
+
+ + + +
+Implementation + + +
public inline fun stable_partition<Element>(
+    self: &mut vector<Element>,
+    p: |&Element|bool
+): u64 {
+    let len = length(self);
+    let t = empty();
+    let f = empty();
+    while (len > 0) {
+        let e = pop_back(self);
+        if (p(&e)) {
+            push_back(&mut t, e);
+        } else {
+            push_back(&mut f, e);
+        };
+        len = len - 1;
+    };
+    let pos = length(&t);
+    reverse_append(self, t);
+    reverse_append(self, f);
+    pos
+}
+
+ + + +
+ + + +## Function `any` + +Return true if any element in the vector satisfies the predicate. + + +
public fun any<Element>(self: &vector<Element>, p: |&Element|bool): bool
+
+ + + +
+Implementation + + +
public inline fun any<Element>(
+    self: &vector<Element>,
+    p: |&Element|bool
+): bool {
+    let result = false;
+    let i = 0;
+    while (i < length(self)) {
+        result = p(borrow(self, i));
+        if (result) {
+            break
+        };
+        i = i + 1
+    };
+    result
+}
+
+ + + +
+ + + +## Function `all` + +Return true if all elements in the vector satisfy the predicate. + + +
public fun all<Element>(self: &vector<Element>, p: |&Element|bool): bool
+
+ + + +
+Implementation + + +
public inline fun all<Element>(
+    self: &vector<Element>,
+    p: |&Element|bool
+): bool {
+    let result = true;
+    let i = 0;
+    while (i < length(self)) {
+        result = p(borrow(self, i));
+        if (!result) {
+            break
+        };
+        i = i + 1
+    };
+    result
+}
+
+ + + +
+ + + +## Function `destroy` + +Destroy a vector, just a wrapper around for_each_reverse with a descriptive name +when used in the context of destroying a vector. + + +
public fun destroy<Element>(self: vector<Element>, d: |Element|)
+
+ + + +
+Implementation + + +
public inline fun destroy<Element>(
+    self: vector<Element>,
+    d: |Element|
+) {
+    for_each_reverse(self, |e| d(e))
+}
+
+ + + +
+ + + +## Function `range` + + + +
public fun range(start: u64, end: u64): vector<u64>
+
+ + + +
+Implementation + + +
public fun range(start: u64, end: u64): vector<u64> {
+    range_with_step(start, end, 1)
+}
+
+ + + +
+ + + +## Function `range_with_step` + + + +
public fun range_with_step(start: u64, end: u64, step: u64): vector<u64>
+
+ + + +
+Implementation + + +
public fun range_with_step(start: u64, end: u64, step: u64): vector<u64> {
+    assert!(step > 0, EINVALID_STEP);
+
+    let vec = vector[];
+    while (start < end) {
+        push_back(&mut vec, start);
+        start = start + step;
+    };
+    vec
+}
+
+ + + +
+ + + +## Function `slice` + + + +
public fun slice<Element: copy>(self: &vector<Element>, start: u64, end: u64): vector<Element>
+
+ + + +
+Implementation + + +
public fun slice<Element: copy>(
+    self: &vector<Element>,
+    start: u64,
+    end: u64
+): vector<Element> {
+    assert!(start <= end && end <= length(self), EINVALID_SLICE_RANGE);
+
+    let vec = vector[];
+    while (start < end) {
+        push_back(&mut vec, *borrow(self, start));
+        start = start + 1;
+    };
+    vec
+}
+
+ + + +
+ + + +## Specification + + + + + +### Helper Functions + + +Check if self is equal to the result of adding e at the end of v2 + + + + + +
fun eq_push_back<Element>(self: vector<Element>, v2: vector<Element>, e: Element): bool {
+    len(self) == len(v2) + 1 &&
+    self[len(self)-1] == e &&
+    self[0..len(self)-1] == v2[0..len(v2)]
+}
+
+ + +Check if self is equal to the result of concatenating v1 and v2 + + + + + +
fun eq_append<Element>(self: vector<Element>, v1: vector<Element>, v2: vector<Element>): bool {
+    len(self) == len(v1) + len(v2) &&
+    self[0..len(v1)] == v1 &&
+    self[len(v1)..len(self)] == v2
+}
+
+ + +Check self is equal to the result of removing the first element of v2 + + + + + +
fun eq_pop_front<Element>(self: vector<Element>, v2: vector<Element>): bool {
+    len(self) + 1 == len(v2) &&
+    self == v2[1..len(v2)]
+}
+
+ + +Check that v1 is equal to the result of removing the element at index i from v2. + + + + + +
fun eq_remove_elem_at_index<Element>(i: u64, v1: vector<Element>, v2: vector<Element>): bool {
+    len(v1) + 1 == len(v2) &&
+    v1[0..i] == v2[0..i] &&
+    v1[i..len(v1)] == v2[i + 1..len(v2)]
+}
+
+ + +Check if self contains e. + + + + + +
fun spec_contains<Element>(self: vector<Element>, e: Element): bool {
+    exists x in self: x == e
+}
+
+ + + + + +### Function `singleton` + + +
public fun singleton<Element>(e: Element): vector<Element>
+
+ + + + +
aborts_if false;
+ensures result == vec(e);
+
+ + + + + +### Function `reverse` + + +
public fun reverse<Element>(self: &mut vector<Element>)
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `reverse_slice` + + +
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64)
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `append` + + +
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `reverse_append` + + +
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>)
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `trim` + + +
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `trim_reverse` + + +
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `is_empty` + + +
public fun is_empty<Element>(self: &vector<Element>): bool
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `contains` + + +
public fun contains<Element>(self: &vector<Element>, e: &Element): bool
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `index_of` + + +
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64)
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `insert` + + +
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `remove` + + +
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `remove_value` + + +
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element>
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `swap_remove` + + +
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `rotate` + + +
public fun rotate<Element>(self: &mut vector<Element>, rot: u64): u64
+
+ + + + +
pragma intrinsic = true;
+
+ + + + + +### Function `rotate_slice` + + +
public fun rotate_slice<Element>(self: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
+
+ + + + +
pragma intrinsic = true;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/src/aptos-natives.bpl b/aptos-move/framework/src/aptos-natives.bpl index 222b2b855594c..f31f51181e25e 100644 --- a/aptos-move/framework/src/aptos-natives.bpl +++ b/aptos-move/framework/src/aptos-natives.bpl @@ -77,3 +77,8 @@ axiom (forall limit: int :: {$1_aggregator_factory_spec_new_aggregator(limit)} axiom (forall limit: int :: {$1_aggregator_factory_spec_new_aggregator(limit)} (var agg := $1_aggregator_factory_spec_new_aggregator(limit); $1_aggregator_spec_aggregator_get_val(agg) == 0)); + +// ================================================================================== +// Native for function_info + +procedure $1_function_info_is_identifier(s: Vec int) returns (res: bool); diff --git a/aptos-move/framework/src/aptos.rs b/aptos-move/framework/src/aptos.rs index b478d25880cb5..baad522290a02 100644 --- a/aptos-move/framework/src/aptos.rs +++ b/aptos-move/framework/src/aptos.rs @@ -4,9 +4,8 @@ #![forbid(unsafe_code)] use crate::{ - docgen::DocgenOptions, extended_checks, path_in_crate, - release_builder::RELEASE_BUNDLE_EXTENSION, release_bundle::ReleaseBundle, BuildOptions, - ReleaseOptions, + docgen::DocgenOptions, path_in_crate, release_builder::RELEASE_BUNDLE_EXTENSION, + release_bundle::ReleaseBundle, BuildOptions, ReleaseOptions, }; use clap::ValueEnum; use move_command_line_common::address::NumericalAddress; @@ -100,14 +99,8 @@ impl ReleaseTarget { .collect::>(); ReleaseOptions { build_options: BuildOptions { - dev: false, with_srcs, with_abis: true, - with_source_maps: false, - with_error_map: true, - named_addresses: Default::default(), - override_std: None, - install_dir: None, with_docs: true, docgen_options: Some(DocgenOptions { include_impl: true, @@ -120,12 +113,7 @@ impl ReleaseTarget { output_format: None, }), skip_fetch_latest_git_deps: true, - bytecode_version: None, - compiler_version: None, - language_version: None, - skip_attribute_checks: false, - check_test_code: false, - known_attributes: extended_checks::get_all_attribute_names().clone(), + ..BuildOptions::default() }, packages: packages.iter().map(|(path, _)| path.to_owned()).collect(), rust_bindings: packages diff --git a/aptos-move/framework/src/built_package.rs b/aptos-move/framework/src/built_package.rs index 74e80e7bc56fa..78f8c48a57676 100644 --- a/aptos-move/framework/src/built_package.rs +++ b/aptos-move/framework/src/built_package.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - docgen::DocgenOptions, + docgen::{get_docgen_output_dir, DocgenOptions}, extended_checks, natives::code::{ModuleMetadata, MoveOption, PackageDep, PackageMetadata, UpgradePolicy}, zip_metadata, zip_metadata_str, RuntimeModuleMetadataV1, APTOS_METADATA_KEY, @@ -16,9 +16,10 @@ use codespan_reporting::{ term::termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}, }; use itertools::Itertools; -use move_binary_format::CompiledModule; +use move_binary_format::{file_format_common::VERSION_7, CompiledModule}; use move_command_line_common::files::MOVE_COMPILED_EXTENSION; use move_compiler::compiled_unit::{CompiledUnit, NamedCompiledModule}; +use move_compiler_v2::{options::Options, Experiment}; use move_core_types::{language_storage::ModuleId, metadata::Metadata}; use move_model::{ metadata::{CompilerVersion, LanguageVersion}, @@ -83,11 +84,13 @@ pub struct BuildOptions { pub docgen_options: Option, #[clap(long)] pub skip_fetch_latest_git_deps: bool, - #[clap(long)] + #[clap(long, default_value_if("move_2", "true", "7"))] pub bytecode_version: Option, - #[clap(long, value_parser = clap::value_parser!(CompilerVersion))] + #[clap(long, value_parser = clap::value_parser!(CompilerVersion), + default_value_if("move_2", "true", "2.0"))] pub compiler_version: Option, - #[clap(long, value_parser = clap::value_parser!(LanguageVersion))] + #[clap(long, value_parser = clap::value_parser!(LanguageVersion), + default_value_if("move_2", "true", "2.0"))] pub language_version: Option, #[clap(long)] pub skip_attribute_checks: bool, @@ -95,6 +98,11 @@ pub struct BuildOptions { pub check_test_code: bool, #[clap(skip)] pub known_attributes: BTreeSet, + #[clap(skip)] + pub experiments: Vec, + /// Select bytecode, language, compiler for Move 2 + #[clap(long)] + pub move_2: bool, } // Because named_addresses has no parser, we can't use clap's default impl. This must be aligned @@ -121,8 +129,32 @@ impl Default for BuildOptions { skip_attribute_checks: false, check_test_code: false, known_attributes: extended_checks::get_all_attribute_names().clone(), + experiments: vec![], + move_2: false, + } + } +} + +impl BuildOptions { + pub fn move_2() -> Self { + BuildOptions { + bytecode_version: Some(VERSION_7), + language_version: Some(LanguageVersion::V2_0), + compiler_version: Some(CompilerVersion::V2_0), + ..Self::default() } } + + pub fn inferred_bytecode_version(&self) -> u32 { + self.language_version + .unwrap_or_default() + .infer_bytecode_version(self.bytecode_version) + } + + pub fn with_experiment(mut self, exp: &str) -> Self { + self.experiments.push(exp.to_string()); + self + } } /// Represents a built package. It allows to extract `PackageMetadata`. Can also be used to @@ -143,7 +175,13 @@ pub fn build_model( language_version: Option, skip_attribute_checks: bool, known_attributes: BTreeSet, + experiments: Vec, ) -> anyhow::Result { + let bytecode_version = Some( + language_version + .unwrap_or_default() + .infer_bytecode_version(bytecode_version), + ); let build_config = BuildConfig { dev_mode, additional_named_addresses, @@ -164,6 +202,7 @@ pub fn build_model( language_version, skip_attribute_checks, known_attributes, + experiments, }, }; let compiler_version = compiler_version.unwrap_or_default(); @@ -183,7 +222,7 @@ impl BuiltPackage { /// This function currently reports all Move compilation errors and warnings to stdout, /// and is not `Ok` if there was an error among those. pub fn build(package_path: PathBuf, options: BuildOptions) -> anyhow::Result { - let bytecode_version = options.bytecode_version; + let bytecode_version = Some(options.inferred_bytecode_version()); let compiler_version = options.compiler_version; let language_version = options.language_version; Self::check_versions(&compiler_version, &language_version)?; @@ -208,6 +247,7 @@ impl BuiltPackage { language_version, skip_attribute_checks, known_attributes: options.known_attributes.clone(), + experiments: options.experiments.clone(), }, }; @@ -217,6 +257,13 @@ impl BuiltPackage { // Run extended checks as well derive runtime metadata let model = &model_opt.expect("move model"); + + if let Some(model_options) = model.get_extension::() { + if model_options.experiment_on(Experiment::STOP_BEFORE_EXTENDED_CHECKS) { + std::process::exit(0) + } + } + let runtime_metadata = extended_checks::run_extended_checks(model); if model.diag_count(Severity::Warning) > 0 { let mut error_writer = StandardStream::stderr(ColorChoice::Auto); @@ -226,6 +273,12 @@ impl BuiltPackage { } } + if let Some(model_options) = model.get_extension::() { + if model_options.experiment_on(Experiment::STOP_AFTER_EXTENDED_CHECKS) { + std::process::exit(0) + } + } + let compiled_pkg_path = package .compiled_package_info .build_flags @@ -257,7 +310,7 @@ impl BuiltPackage { .unwrap() .parent() .unwrap() - .join("doc") + .join(get_docgen_output_dir()) .display() .to_string() }) @@ -325,9 +378,8 @@ impl BuiltPackage { self.package .root_modules() .map(|unit_with_source| { - unit_with_source - .unit - .serialize(self.options.bytecode_version) + let bytecode_version = self.options.inferred_bytecode_version(); + unit_with_source.unit.serialize(Some(bytecode_version)) }) .collect() } @@ -374,7 +426,7 @@ impl BuiltPackage { .map(|unit_with_source| { unit_with_source .unit - .serialize(self.options.bytecode_version) + .serialize(Some(self.options.inferred_bytecode_version())) }) .collect() } diff --git a/aptos-move/framework/src/chunked_publish.rs b/aptos-move/framework/src/chunked_publish.rs new file mode 100644 index 0000000000000..fcf14bbb08ceb --- /dev/null +++ b/aptos-move/framework/src/chunked_publish.rs @@ -0,0 +1,191 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_types::transaction::{EntryFunction, TransactionPayload}; +use move_core_types::{account_address::AccountAddress, ident_str, language_storage::ModuleId}; + +pub const LARGE_PACKAGES_MODULE_ADDRESS: &str = + "0xa29df848eebfe5d981f708c2a5b06d31af2be53bbd8ddc94c8523f4b903f7adb"; // mainnet and testnet + +/// Maximum code & metadata chunk size to be included in a transaction +pub const MAX_CHUNK_SIZE_IN_BYTES: usize = 60_000; + +pub enum PublishType { + AccountDeploy, + ObjectDeploy, + ObjectUpgrade, +} + +pub fn chunk_package_and_create_payloads( + metadata: Vec, + package_code: Vec>, + publish_type: PublishType, + object_address: Option, +) -> Vec { + // Chunk the metadata + let mut metadata_chunks = create_chunks(metadata); + // Separate last chunk for special handling + let mut metadata_chunk = metadata_chunks.pop().expect("Metadata is required"); + + let mut taken_size = metadata_chunk.len(); + let mut payloads = metadata_chunks + .into_iter() + .map(|chunk| large_packages_stage_code_chunk(chunk, vec![], vec![])) + .collect::>(); + + let mut code_indices: Vec = vec![]; + let mut code_chunks: Vec> = vec![]; + + for (idx, module_code) in package_code.into_iter().enumerate() { + let chunked_module = create_chunks(module_code); + for chunk in chunked_module { + if taken_size + chunk.len() > MAX_CHUNK_SIZE_IN_BYTES { + // Create a payload and reset accumulators + let payload = large_packages_stage_code_chunk( + metadata_chunk, + code_indices.clone(), + code_chunks.clone(), + ); + payloads.push(payload); + + metadata_chunk = vec![]; + code_indices.clear(); + code_chunks.clear(); + taken_size = 0; + } + + code_indices.push(idx as u16); + taken_size += chunk.len(); + code_chunks.push(chunk); + } + } + + // The final call includes staging the last metadata and code chunk, and then publishing or upgrading the package on-chain. + let payload = match publish_type { + PublishType::AccountDeploy => large_packages_stage_code_chunk_and_publish_to_account( + metadata_chunk, + code_indices, + code_chunks, + ), + PublishType::ObjectDeploy => large_packages_stage_code_chunk_and_publish_to_object( + metadata_chunk, + code_indices, + code_chunks, + ), + PublishType::ObjectUpgrade => large_packages_stage_code_chunk_and_upgrade_object_code( + metadata_chunk, + code_indices, + code_chunks, + object_address, + ), + }; + payloads.push(payload); + + payloads +} + +// Create chunks of data based on the defined maximum chunk size. +fn create_chunks(data: Vec) -> Vec> { + data.chunks(MAX_CHUNK_SIZE_IN_BYTES) + .map(|chunk| chunk.to_vec()) + .collect() +} + +// Create a transaction payload for staging chunked data to the staging area. +fn large_packages_stage_code_chunk( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally publishing the package to an account. +fn large_packages_stage_code_chunk_and_publish_to_account( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_publish_to_account").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally publishing the package to an object. +fn large_packages_stage_code_chunk_and_publish_to_object( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_publish_to_object").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally upgrading the object package. +fn large_packages_stage_code_chunk_and_upgrade_object_code( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, + code_object: Option, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_upgrade_object_code").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + bcs::to_bytes(&code_object).unwrap(), + ], + )) +} + +// Cleanup account's `StagingArea` resource. +pub fn large_packages_cleanup_staging_area() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::from_hex_literal(LARGE_PACKAGES_MODULE_ADDRESS).unwrap(), + ident_str!("large_packages").to_owned(), + ), + ident_str!("cleanup_staging_area").to_owned(), + vec![], + vec![], + )) +} diff --git a/aptos-move/framework/src/docgen.rs b/aptos-move/framework/src/docgen.rs index f083489831e13..7b15f234d37b1 100644 --- a/aptos-move/framework/src/docgen.rs +++ b/aptos-move/framework/src/docgen.rs @@ -10,6 +10,11 @@ use move_docgen::OutputFormat; use move_model::model::GlobalEnv; use std::{path::PathBuf, sync::Mutex}; +pub fn get_docgen_output_dir() -> String { + const MVC_DOCGEN_OUTPUT_DIR: &str = "MVC_DOCGEN_OUTPUT_DIR"; + std::env::var(MVC_DOCGEN_OUTPUT_DIR).unwrap_or_else(|_| "doc".to_owned()) +} + #[derive(Debug, Clone, clap::Parser, serde::Serialize, serde::Deserialize, Default)] pub struct DocgenOptions { /// Whether to include private declarations and implementations into the generated @@ -70,7 +75,7 @@ impl DocgenOptions { let _lock = MUTEX.lock(); let current_dir = std::env::current_dir()?.canonicalize()?; std::env::set_current_dir(&package_path)?; - let output_directory = PathBuf::from("doc"); + let output_directory = PathBuf::from(get_docgen_output_dir()); let doc_path = doc_path .into_iter() .filter_map(|s| { diff --git a/aptos-move/framework/src/lib.rs b/aptos-move/framework/src/lib.rs index d9c7a338e48d4..5103790707cfe 100644 --- a/aptos-move/framework/src/lib.rs +++ b/aptos-move/framework/src/lib.rs @@ -21,6 +21,7 @@ pub use release_builder::*; pub mod docgen; pub mod extended_checks; pub use extended_checks::ResourceGroupScope; +pub mod chunked_publish; pub mod prover; mod release_bundle; mod released_framework; diff --git a/aptos-move/framework/src/module_metadata.rs b/aptos-move/framework/src/module_metadata.rs index 4b969d1b96eae..18a6178e23bc6 100644 --- a/aptos-move/framework/src/module_metadata.rs +++ b/aptos-move/framework/src/module_metadata.rs @@ -312,6 +312,17 @@ pub fn get_compilation_metadata_from_compiled_module( } } +/// Extract compilation metadata from a compiled script +pub fn get_compilation_metadata_from_compiled_script( + module: &CompiledScript, +) -> Option { + if let Some(data) = find_metadata_in_script(module, COMPILATION_METADATA_KEY) { + bcs::from_bytes::(&data.value).ok() + } else { + None + } +} + // This is mostly a copy paste of the existing function // get_metadata_from_compiled_module. In the API types there is a unifying trait for // modules and scripts called Bytecode that could help eliminate this duplication, @@ -431,7 +442,7 @@ pub fn is_valid_resource_group( if let Ok(ident_struct) = Identifier::new(struct_) { if let Some((struct_handle, struct_def)) = structs.get(ident_struct.as_ident_str()) { let num_fields = match &struct_def.field_information { - StructFieldInformation::Native => 0, + StructFieldInformation::Native | StructFieldInformation::DeclaredVariants(_) => 0, StructFieldInformation::Declared(fields) => fields.len(), }; if struct_handle.abilities == AbilitySet::EMPTY @@ -613,11 +624,23 @@ fn check_module_complexity(module: &CompiledModule) -> Result<(), MetaDataValida check_ident_complexity(module, &mut meter, handle.name)?; } for def in module.struct_defs() { - if let StructFieldInformation::Declared(fields) = &def.field_information { - for field in fields { - check_ident_complexity(module, &mut meter, field.name)?; - check_sigtok_complexity(module, &mut meter, &field.signature.0)? - } + match &def.field_information { + StructFieldInformation::Native => {}, + StructFieldInformation::Declared(fields) => { + for field in fields { + check_ident_complexity(module, &mut meter, field.name)?; + check_sigtok_complexity(module, &mut meter, &field.signature.0)? + } + }, + StructFieldInformation::DeclaredVariants(variants) => { + for variant in variants { + check_ident_complexity(module, &mut meter, variant.name)?; + for field in &variant.fields { + check_ident_complexity(module, &mut meter, field.name)?; + check_sigtok_complexity(module, &mut meter, &field.signature.0)? + } + } + }, } } for def in module.function_defs() { diff --git a/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs b/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs index d7d0d76a6784d..9554c5d163377 100644 --- a/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs +++ b/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs @@ -11,7 +11,7 @@ use smallvec::SmallVec; use std::collections::VecDeque; /*************************************************************************************************** - * native fun dispatchable_withdraw / dispatchable_deposit / dispatchable_derived_balance + * native fun dispatchable_withdraw / dispatchable_deposit / dispatchable_derived_balance / dispatchable_derived_supply * * Directs control flow based on the last argument. We use the same native function implementation * for all dispatching native. @@ -54,6 +54,7 @@ pub fn make_all( ("dispatchable_withdraw", native_dispatch as RawSafeNative), ("dispatchable_deposit", native_dispatch), ("dispatchable_derived_balance", native_dispatch), + ("dispatchable_derived_supply", native_dispatch), ]; builder.make_named_natives(natives) diff --git a/aptos-move/framework/src/natives/function_info.rs b/aptos-move/framework/src/natives/function_info.rs index 80a953af031dd..2884ad8c82141 100644 --- a/aptos-move/framework/src/natives/function_info.rs +++ b/aptos-move/framework/src/natives/function_info.rs @@ -116,6 +116,10 @@ fn native_check_dispatch_type_compatibility_impl( && rhs.return_tys() == lhs.return_tys() && &lhs.param_tys()[0..lhs.param_count() - 1] == rhs.param_tys() && !rhs.is_friend_or_private() + && (!context + .get_feature_flags() + .is_enabled(aptos_types::on_chain_config::FeatureFlag::DISALLOW_USER_NATIVES) + || !rhs.is_native()) && lhs_id != rhs_id )]) } diff --git a/aptos-move/framework/src/natives/mod.rs b/aptos-move/framework/src/natives/mod.rs index bec070b996540..dcfc80407f932 100644 --- a/aptos-move/framework/src/natives/mod.rs +++ b/aptos-move/framework/src/natives/mod.rs @@ -24,7 +24,7 @@ pub mod util; use crate::natives::cryptography::multi_ed25519; use aggregator_natives::{aggregator, aggregator_factory, aggregator_v2}; -use aptos_native_interface::SafeNativeBuilder; +use aptos_native_interface::{RawSafeNative, SafeNativeBuilder}; use cryptography::ed25519; use move_core_types::account_address::AccountAddress; use move_vm_runtime::native_functions::{make_table_from_iter, NativeFunctionTable}; @@ -39,6 +39,7 @@ pub mod status { pub fn all_natives( framework_addr: AccountAddress, builder: &SafeNativeBuilder, + inject_create_signer_for_gov_sim: bool, ) -> NativeFunctionTable { let mut natives = vec![]; @@ -91,5 +92,15 @@ pub fn all_natives( dispatchable_fungible_asset::make_all(builder) ); + if inject_create_signer_for_gov_sim { + add_natives_from_module!( + "aptos_governance", + builder.make_named_natives([( + "create_signer", + create_signer::native_create_signer as RawSafeNative + )]) + ); + } + make_table_from_iter(framework_addr, natives) } diff --git a/aptos-move/framework/src/natives/string_utils.rs b/aptos-move/framework/src/natives/string_utils.rs index b724fe2c476d8..0a4c7c71583f8 100644 --- a/aptos-move/framework/src/natives/string_utils.rs +++ b/aptos-move/framework/src/natives/string_utils.rs @@ -308,6 +308,45 @@ fn native_format_impl( )?; out.push('}'); }, + MoveTypeLayout::Struct(MoveStructLayout::RuntimeVariants(variants)) => { + let struct_value = val.value_as::()?; + let (tag, elems) = struct_value.unpack_with_tag()?; + if (tag as usize) >= variants.len() { + return Err(SafeNativeError::Abort { + abort_code: EINVALID_FORMAT, + }); + } + out.push_str(&format!("#{}{{", tag)); + format_vector( + context, + variants[tag as usize].iter(), + elems.collect(), + depth, + !context.single_line, + out, + )?; + out.push('}'); + }, + MoveTypeLayout::Struct(MoveStructLayout::WithVariants(variants)) => { + let struct_value = val.value_as::()?; + let (tag, elems) = struct_value.unpack_with_tag()?; + if (tag as usize) >= variants.len() { + return Err(SafeNativeError::Abort { + abort_code: EINVALID_FORMAT, + }); + } + let variant = &variants[tag as usize]; + out.push_str(&format!("{}{{", variant.name)); + format_vector( + context, + variant.fields.iter(), + elems.collect(), + depth, + !context.single_line, + out, + )?; + out.push('}'); + }, // This is unreachable because we check layout at the start. Still, return // an error to be safe. diff --git a/aptos-move/framework/src/prover.rs b/aptos-move/framework/src/prover.rs index 5351388a66ad1..c88b04596f094 100644 --- a/aptos-move/framework/src/prover.rs +++ b/aptos-move/framework/src/prover.rs @@ -123,6 +123,7 @@ impl ProverOptions { language_version: Option, skip_attribute_checks: bool, known_attributes: &BTreeSet, + experiments: &[String], ) -> anyhow::Result<()> { let now = Instant::now(); let for_test = self.for_test; @@ -136,6 +137,7 @@ impl ProverOptions { language_version, skip_attribute_checks, known_attributes.clone(), + experiments.to_vec(), )?; let mut options = self.convert_options(); // Need to ensure a distinct output.bpl file for concurrent execution. In non-test diff --git a/aptos-move/framework/src/release_bundle.rs b/aptos-move/framework/src/release_bundle.rs index 25a4054438005..ed6044c838f91 100644 --- a/aptos-move/framework/src/release_bundle.rs +++ b/aptos-move/framework/src/release_bundle.rs @@ -3,6 +3,7 @@ use crate::{built_package::BuiltPackage, natives::code::PackageMetadata, path_in_crate}; use anyhow::Context; +use aptos_crypto::HashValue; use aptos_types::account_address::AccountAddress; use move_binary_format::{access::ModuleAccess, errors::PartialVMError, CompiledModule}; use move_command_line_common::files::{extension_equals, find_filenames, MOVE_EXTENSION}; @@ -165,7 +166,7 @@ impl ReleasePackage { for_address: AccountAddress, out: PathBuf, ) -> anyhow::Result<()> { - self.generate_script_proposal_impl(for_address, out, false, false, Vec::new()) + self.generate_script_proposal_impl(for_address, out, false, false, None) } pub fn generate_script_proposal_testnet( @@ -173,14 +174,14 @@ impl ReleasePackage { for_address: AccountAddress, out: PathBuf, ) -> anyhow::Result<()> { - self.generate_script_proposal_impl(for_address, out, true, false, Vec::new()) + self.generate_script_proposal_impl(for_address, out, true, false, None) } pub fn generate_script_proposal_multi_step( &self, for_address: AccountAddress, out: PathBuf, - next_execution_hash: Vec, + next_execution_hash: Option, ) -> anyhow::Result<()> { self.generate_script_proposal_impl(for_address, out, true, true, next_execution_hash) } @@ -191,7 +192,7 @@ impl ReleasePackage { out: PathBuf, is_testnet: bool, is_multi_step: bool, - next_execution_hash: Vec, + next_execution_hash: Option, ) -> anyhow::Result<()> { let writer = CodeWriter::new(Loc::default()); emitln!( @@ -225,14 +226,14 @@ impl ReleasePackage { } else { emitln!(writer, "fun main(proposal_id: u64){"); writer.indent(); - Self::generate_next_execution_hash_blob(&writer, for_address, next_execution_hash); + generate_next_execution_hash_blob(&writer, for_address, next_execution_hash); } emitln!(writer, "let code = vector::empty();"); for i in 0..self.code.len() { emitln!(writer, "let chunk{} = ", i); - Self::generate_blob_as_hex_string(&writer, &self.code[i]); + generate_blob_as_hex_string(&writer, &self.code[i]); emitln!(writer, ";"); emitln!(writer, "vector::push_back(&mut code, chunk{});", i); } @@ -253,7 +254,7 @@ impl ReleasePackage { }; let chunk = metadata.drain(0..to_drain).collect::>(); emit!(writer, "let chunk{} = ", i); - Self::generate_blob_as_hex_string(&writer, &chunk); + generate_blob_as_hex_string(&writer, &chunk); emitln!(writer, ";") } @@ -272,28 +273,31 @@ impl ReleasePackage { writer.process_result(|s| std::fs::write(&out, s))?; Ok(()) } +} - fn generate_blob_as_hex_string(writer: &CodeWriter, data: &[u8]) { - emit!(writer, "x\""); - for b in data.iter() { - emit!(writer, "{:02x}", b); - } - emit!(writer, "\""); +pub fn generate_blob_as_hex_string(writer: &CodeWriter, data: &[u8]) { + emit!(writer, "x\""); + for b in data.iter() { + emit!(writer, "{:02x}", b); } + emit!(writer, "\""); +} - fn generate_next_execution_hash_blob( - writer: &CodeWriter, - for_address: AccountAddress, - next_execution_hash: Vec, - ) { - if next_execution_hash == "vector::empty()".as_bytes() { +pub fn generate_next_execution_hash_blob( + writer: &CodeWriter, + for_address: AccountAddress, + next_execution_hash: Option, +) { + match next_execution_hash { + None => { emitln!( - writer, - "let framework_signer = supra_governance::resolve_multi_step_proposal(proposal_id, @{}, {});\n", - for_address, - "vector::empty()", - ); - } else { + writer, + "let framework_signer = supra_governance::resolve_multi_step_proposal(proposal_id, @{}, {});\n", + for_address, + "x\"\"", + ); + }, + Some(next_execution_hash) => { emitln!( writer, "let framework_signer = supra_governance::resolve_multi_step_proposal(" @@ -301,14 +305,11 @@ impl ReleasePackage { writer.indent(); emitln!(writer, "proposal_id,"); emitln!(writer, "@{},", for_address); - emit!(writer, "vector["); - for b in next_execution_hash.iter() { - emit!(writer, "{}u8,", b); - } - emitln!(writer, "],"); + generate_blob_as_hex_string(writer, next_execution_hash.as_slice()); + emit!(writer, ","); writer.unindent(); emitln!(writer, ");"); - } + }, } } diff --git a/aptos-move/framework/supra-framework/doc/account.md b/aptos-move/framework/supra-framework/doc/account.md index 4fc29c93fda93..9668e554bd182 100644 --- a/aptos-move/framework/supra-framework/doc/account.md +++ b/aptos-move/framework/supra-framework/doc/account.md @@ -760,7 +760,7 @@ The caller does not have a digital-signature-based capability to call this funct -The specified rotation capablity offer does not exist at the specified offerer address +The specified rotation capability offer does not exist at the specified offerer address
const ENO_SUCH_ROTATION_CAPABILITY_OFFER: u64 = 18;
@@ -1224,7 +1224,7 @@ A scheme of 0 refers to an Ed25519 key and a scheme of 1 refers to Multi-Ed25519
 Here is an example attack if we don't ask for the second signature cap_update_table:
 Alice has rotated her account addr_a to new_addr_a. As a result, the following entry is created, to help Alice when recovering her wallet:
 OriginatingAddress[new_addr_a] -> addr_a
-Alice has had bad day: her laptop blew up and she needs to reset her account on a new one.
+Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one.
 (Fortunately, she still has her secret key new_sk_a associated with her new address new_addr_a, so she can do this.)
 
 But Bob likes to mess with Alice.
@@ -3151,6 +3151,7 @@ The value of signer_capability_offer.for of Account resource under the signer is
 pragma aborts_if_is_strict = false;
 aborts_if [abstract] false;
 ensures [abstract] result == spec_create_resource_address(source, seed);
+ensures [abstract] source != result;
 
@@ -3240,7 +3241,7 @@ The system reserved addresses is @0x1 / @0x2 / @0x3 / @0x4 / @0x5 / @0x6 / @0x7 The Account existed under the signer. -The guid_creation_num of the ccount resource is up to MAX_U64. +The guid_creation_num of the account resource is up to MAX_U64.
let addr = signer::address_of(account_signer);
diff --git a/aptos-move/framework/supra-framework/doc/aggregator_factory.md b/aptos-move/framework/supra-framework/doc/aggregator_factory.md
index a731f537bc88c..b2a20d5d13908 100644
--- a/aptos-move/framework/supra-framework/doc/aggregator_factory.md
+++ b/aptos-move/framework/supra-framework/doc/aggregator_factory.md
@@ -217,7 +217,7 @@ Returns a new aggregator.
 2
 To create a new aggregator instance, the aggregator factory must already be initialized and exist under the Supra account.
 High
-The create_aggregator_internal function asserts that AggregatorFactory exists for the Supra account.
+The create_aggregator_internal function asserts that AggregatorFactory exists for the Aptos account.
 Formally verified via CreateAggregatorInternalAbortsIf.
 
 
diff --git a/aptos-move/framework/supra-framework/doc/aggregator_v2.md b/aptos-move/framework/supra-framework/doc/aggregator_v2.md
index 842ad22ecdab3..38704a60e34f2 100644
--- a/aptos-move/framework/supra-framework/doc/aggregator_v2.md
+++ b/aptos-move/framework/supra-framework/doc/aggregator_v2.md
@@ -58,6 +58,10 @@ read, read_snapshot, read_derived_string
     -  [Function `read`](#@Specification_1_read)
     -  [Function `snapshot`](#@Specification_1_snapshot)
     -  [Function `create_snapshot`](#@Specification_1_create_snapshot)
+    -  [Function `read_snapshot`](#@Specification_1_read_snapshot)
+    -  [Function `read_derived_string`](#@Specification_1_read_derived_string)
+    -  [Function `create_derived_string`](#@Specification_1_create_derived_string)
+    -  [Function `derive_string_concat`](#@Specification_1_derive_string_concat)
     -  [Function `copy_snapshot`](#@Specification_1_copy_snapshot)
     -  [Function `string_concat`](#@Specification_1_string_concat)
 
@@ -902,6 +906,70 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
 
 
 
+
pragma opaque;
+
+ + + + + +### Function `read_snapshot` + + +
public fun read_snapshot<IntElement>(snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>): IntElement
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `read_derived_string` + + +
public fun read_derived_string(snapshot: &aggregator_v2::DerivedStringSnapshot): string::String
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `create_derived_string` + + +
public fun create_derived_string(value: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `derive_string_concat` + + +
public fun derive_string_concat<IntElement>(before: string::String, snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>, after: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + +
pragma opaque;
 
diff --git a/aptos-move/framework/supra-framework/doc/block.md b/aptos-move/framework/supra-framework/doc/block.md index cd6b2ab647d8b..51c418a64bd73 100644 --- a/aptos-move/framework/supra-framework/doc/block.md +++ b/aptos-move/framework/supra-framework/doc/block.md @@ -924,7 +924,7 @@ new block event for WriteSetPayload. 1 -During the module's initialization, it guarantees that the BlockResource resource moves under the Supra framework account with initial values. +During the module's initialization, it guarantees that the BlockResource resource moves under the Aptos framework account with initial values. High The initialize function is responsible for setting up the initial state of the module, ensuring that the following conditions are met (1) the BlockResource resource is created, indicating its existence within the module's context, and moved under the Supra framework account, (2) the block height is set to zero during initialization, and (3) the epoch interval is greater than zero. Formally Verified via Initialize. diff --git a/aptos-move/framework/supra-framework/doc/chain_id.md b/aptos-move/framework/supra-framework/doc/chain_id.md index 9584e32a83824..bf1ee60187528 100644 --- a/aptos-move/framework/supra-framework/doc/chain_id.md +++ b/aptos-move/framework/supra-framework/doc/chain_id.md @@ -129,7 +129,7 @@ Return the chain ID of this instance. 2 -The chain id can only be fetched if the chain id resource exists under the Supra framework account. +The chain id can only be fetched if the chain id resource exists under the Aptos framework account. Low The chain_id::get function fetches the chain id by borrowing the ChainId resource from the supra_framework account. Formally verified via get. diff --git a/aptos-move/framework/supra-framework/doc/code.md b/aptos-move/framework/supra-framework/doc/code.md index 36721ea20c942..2c6f85409860b 100644 --- a/aptos-move/framework/supra-framework/doc/code.md +++ b/aptos-move/framework/supra-framework/doc/code.md @@ -612,9 +612,9 @@ package. // Checks for valid dependencies to other packages let allowed_deps = check_dependencies(addr, &pack); - // Check package against conflicts + // Check package against conflicts // To avoid prover compiler error on spec - // the package need to be an immutable variable + // the package need to be an immutable variable let module_names = get_module_names(&pack); let package_immutable = &borrow_global<PackageRegistry>(addr).packages; let len = vector::length(package_immutable); @@ -688,11 +688,11 @@ package. let registry = borrow_global_mut<PackageRegistry>(code_object_addr); vector::for_each_mut(&mut registry.packages, |pack| { - let package: &mut PackageMetadata = pack; - package.upgrade_policy = upgrade_policy_immutable(); + let package: &mut PackageMetadata = pack; + package.upgrade_policy = upgrade_policy_immutable(); }); - // We unfortunately have to make a copy of each package to avoid borrow checker issues as check_dependencies + // We unfortunately have to make a copy of each package to avoid borrow checker issues as check_dependencies // needs to borrow PackageRegistry from the dependency packages. // This would increase the amount of gas used, but this is a rare operation and it's rare to have many packages // in a single code object. @@ -787,7 +787,7 @@ Checks whether a new package with given names can co-exist with old package.
fun check_coexistence(old_pack: &PackageMetadata, new_modules: &vector<String>) {
-    // The modules introduced by each package must not overlap with `names`.
+    // The modules introduced by each package must not overlap with `names`.
     vector::for_each_ref(&old_pack.modules, |old_mod| {
         let old_mod: &ModuleMetadata = old_mod;
         let j = 0;
diff --git a/aptos-move/framework/supra-framework/doc/config_buffer.md b/aptos-move/framework/supra-framework/doc/config_buffer.md
index b235b5f7f5f06..214505cd9b849 100644
--- a/aptos-move/framework/supra-framework/doc/config_buffer.md
+++ b/aptos-move/framework/supra-framework/doc/config_buffer.md
@@ -287,7 +287,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config.
     let key = type_info::type_name<T>();
     aborts_if !simple_map::spec_contains_key(configs.configs, key);
     include any::UnpackAbortsIf<T> {
-        x: simple_map::spec_get(configs.configs, key)
+        self: simple_map::spec_get(configs.configs, key)
     };
 }
 
@@ -318,7 +318,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config. let type_name = type_info::type_name<T>(); let configs = global<PendingConfigs>(@supra_framework); include spec_fun_does_exist<T>(type_name) ==> any::UnpackAbortsIf<T> { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; }
@@ -333,7 +333,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config. let type_name = type_info::type_name<T>(); let configs = global<PendingConfigs>(@supra_framework); include spec_fun_does_exist<T>(type_name) ==> any::UnpackRequirement<T> { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; }
diff --git a/aptos-move/framework/supra-framework/doc/dispatchable_fungible_asset.md b/aptos-move/framework/supra-framework/doc/dispatchable_fungible_asset.md index bb6d62d42743e..b3b76f108128c 100644 --- a/aptos-move/framework/supra-framework/doc/dispatchable_fungible_asset.md +++ b/aptos-move/framework/supra-framework/doc/dispatchable_fungible_asset.md @@ -23,19 +23,23 @@ See AIP-73 for further discussion - [Resource `TransferRefStore`](#0x1_dispatchable_fungible_asset_TransferRefStore) - [Constants](#@Constants_0) - [Function `register_dispatch_functions`](#0x1_dispatchable_fungible_asset_register_dispatch_functions) +- [Function `register_derive_supply_dispatch_function`](#0x1_dispatchable_fungible_asset_register_derive_supply_dispatch_function) - [Function `withdraw`](#0x1_dispatchable_fungible_asset_withdraw) - [Function `deposit`](#0x1_dispatchable_fungible_asset_deposit) - [Function `transfer`](#0x1_dispatchable_fungible_asset_transfer) - [Function `transfer_assert_minimum_deposit`](#0x1_dispatchable_fungible_asset_transfer_assert_minimum_deposit) - [Function `derived_balance`](#0x1_dispatchable_fungible_asset_derived_balance) +- [Function `derived_supply`](#0x1_dispatchable_fungible_asset_derived_supply) - [Function `borrow_transfer_ref`](#0x1_dispatchable_fungible_asset_borrow_transfer_ref) - [Function `dispatchable_withdraw`](#0x1_dispatchable_fungible_asset_dispatchable_withdraw) - [Function `dispatchable_deposit`](#0x1_dispatchable_fungible_asset_dispatchable_deposit) - [Function `dispatchable_derived_balance`](#0x1_dispatchable_fungible_asset_dispatchable_derived_balance) +- [Function `dispatchable_derived_supply`](#0x1_dispatchable_fungible_asset_dispatchable_derived_supply) - [Specification](#@Specification_1) - [Function `dispatchable_withdraw`](#@Specification_1_dispatchable_withdraw) - [Function `dispatchable_deposit`](#@Specification_1_dispatchable_deposit) - [Function `dispatchable_derived_balance`](#@Specification_1_dispatchable_derived_balance) + - [Function `dispatchable_derived_supply`](#@Specification_1_dispatchable_derived_supply)
use 0x1::error;
@@ -160,6 +164,36 @@ TransferRefStore doesn't exist on the fungible asset type.
 
 
 
+
+
+
+
+## Function `register_derive_supply_dispatch_function`
+
+
+
+
public fun register_derive_supply_dispatch_function(constructor_ref: &object::ConstructorRef, dispatch_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public fun register_derive_supply_dispatch_function(
+    constructor_ref: &ConstructorRef,
+    dispatch_function: Option<FunctionInfo>
+) {
+    fungible_asset::register_derive_supply_dispatch_function(
+        constructor_ref,
+        dispatch_function
+    );
+}
+
+ + +
@@ -364,6 +398,45 @@ The semantics of value will be governed by the function specified in DispatchFun + + + + +## Function `derived_supply` + +Get the derived supply of the fungible asset using the overloaded hook. + +The semantics of supply will be governed by the function specified in DeriveSupplyDispatch. + + +
#[view]
+public fun derived_supply<T: key>(metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun derived_supply<T: key>(metadata: Object<T>): Option<u128> {
+    let func_opt = fungible_asset::derived_supply_dispatch_function(metadata);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        dispatchable_derived_supply(metadata, func)
+    } else {
+        fungible_asset::supply(metadata)
+    }
+}
+
+ + +
@@ -474,6 +547,31 @@ The semantics of value will be governed by the function specified in DispatchFun + + + + +## Function `dispatchable_derived_supply` + + + +
fun dispatchable_derived_supply<T: key>(store: object::Object<T>, function: &function_info::FunctionInfo): option::Option<u128>
+
+ + + +
+Implementation + + +
native fun dispatchable_derived_supply<T: key>(
+    store: Object<T>,
+    function: &FunctionInfo,
+): Option<u128>;
+
+ + +
@@ -530,6 +628,22 @@ The semantics of value will be governed by the function specified in DispatchFun +
pragma opaque;
+
+ + + + + +### Function `dispatchable_derived_supply` + + +
fun dispatchable_derived_supply<T: key>(store: object::Object<T>, function: &function_info::FunctionInfo): option::Option<u128>
+
+ + + +
pragma opaque;
 
diff --git a/aptos-move/framework/supra-framework/doc/fungible_asset.md b/aptos-move/framework/supra-framework/doc/fungible_asset.md index fd1f5351a082e..9aa85d53a6a48 100644 --- a/aptos-move/framework/supra-framework/doc/fungible_asset.md +++ b/aptos-move/framework/supra-framework/doc/fungible_asset.md @@ -13,6 +13,7 @@ metadata object can be any object that equipped with + +## Resource `DeriveSupply` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct DeriveSupply has key
+
+ + + +
+Fields + + +
+
+dispatch_function: option::Option<function_info::FunctionInfo> +
+
+ +
+
+ +
@@ -892,6 +924,16 @@ Provided derived_balance function type doesn't meet the signature requirement. + + +Provided derived_supply function type doesn't meet the signature requirement. + + +
const EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH: u64 = 33;
+
+ + + Fungible asset and store do not match. @@ -1403,28 +1445,13 @@ Create a fungible asset store whose transfer rule would be overloaded by the pro ) ); }); - - // Cannot register hook for SUPRA. - assert!( - object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset, - error::permission_denied(EAPT_NOT_DISPATCHABLE) - ); - assert!( - !object::can_generate_delete_ref(constructor_ref), - error::invalid_argument(EOBJECT_IS_DELETABLE) - ); + register_dispatch_function_sanity_check(constructor_ref); assert!( !exists<DispatchFunctionStore>( object::address_from_constructor_ref(constructor_ref) ), error::already_exists(EALREADY_REGISTERED) ); - assert!( - exists<Metadata>( - object::address_from_constructor_ref(constructor_ref) - ), - error::not_found(EFUNGIBLE_METADATA_EXISTENCE), - ); let store_obj = &object::generate_signer(constructor_ref); @@ -1442,6 +1469,110 @@ Create a fungible asset store whose transfer rule would be overloaded by the pro + + + + +## Function `register_derive_supply_dispatch_function` + +Define the derived supply dispatch with the provided function. + + +
public(friend) fun register_derive_supply_dispatch_function(constructor_ref: &object::ConstructorRef, dispatch_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public(friend) fun register_derive_supply_dispatch_function(
+    constructor_ref: &ConstructorRef,
+    dispatch_function: Option<FunctionInfo>
+) {
+    // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+    option::for_each_ref(&dispatch_function, |supply_function| {
+        let function_info = function_info::new_function_info_from_address(
+            @supra_framework,
+            string::utf8(b"dispatchable_fungible_asset"),
+            string::utf8(b"dispatchable_derived_supply"),
+        );
+        // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+        assert!(
+            function_info::check_dispatch_type_compatibility(
+                &function_info,
+                supply_function
+            ),
+            error::invalid_argument(
+                EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH
+            )
+        );
+    });
+    register_dispatch_function_sanity_check(constructor_ref);
+    assert!(
+        !exists<DeriveSupply>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::already_exists(EALREADY_REGISTERED)
+    );
+
+
+    let store_obj = &object::generate_signer(constructor_ref);
+
+    // Store the overload function hook.
+    move_to<DeriveSupply>(
+        store_obj,
+        DeriveSupply {
+            dispatch_function
+        }
+    );
+}
+
+ + + +
+ + + +## Function `register_dispatch_function_sanity_check` + +Check the requirements for registering a dispatchable function. + + +
fun register_dispatch_function_sanity_check(constructor_ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
inline fun register_dispatch_function_sanity_check(
+    constructor_ref: &ConstructorRef,
+)  {
+    // Cannot register hook for APT.
+    assert!(
+        object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset,
+        error::permission_denied(EAPT_NOT_DISPATCHABLE)
+    );
+    assert!(
+        !object::can_generate_delete_ref(constructor_ref),
+        error::invalid_argument(EOBJECT_IS_DELETABLE)
+    );
+    assert!(
+        exists<Metadata>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::not_found(EFUNGIBLE_METADATA_EXISTENCE),
+    );
+}
+
+ + +
@@ -2243,6 +2374,35 @@ Return whether a fungible asset type is dispatchable. + + + + +## Function `derived_supply_dispatch_function` + + + +
public(friend) fun derived_supply_dispatch_function<T: key>(metadata: object::Object<T>): option::Option<function_info::FunctionInfo>
+
+ + + +
+Implementation + + +
public(friend) fun derived_supply_dispatch_function<T: key>(metadata: Object<T>): Option<FunctionInfo> acquires DeriveSupply {
+    let metadata_addr = object::object_address(&metadata);
+    if (exists<DeriveSupply>(metadata_addr)) {
+        borrow_global<DeriveSupply>(metadata_addr).dispatch_function
+    } else {
+        option::none()
+    }
+}
+
+ + +
@@ -3010,19 +3170,29 @@ Mutate specified fields of the fungible asset's Metadata>(metadata_address); if (option::is_some(&name)){ - mutable_metadata.name = option::extract(&mut name); + let name = option::extract(&mut name); + assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG)); + mutable_metadata.name = name; }; if (option::is_some(&symbol)){ - mutable_metadata.symbol = option::extract(&mut symbol); + let symbol = option::extract(&mut symbol); + assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG)); + mutable_metadata.symbol = symbol; }; if (option::is_some(&decimals)){ - mutable_metadata.decimals = option::extract(&mut decimals); + let decimals = option::extract(&mut decimals); + assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE)); + mutable_metadata.decimals = decimals; }; if (option::is_some(&icon_uri)){ - mutable_metadata.icon_uri = option::extract(&mut icon_uri); + let icon_uri = option::extract(&mut icon_uri); + assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.icon_uri = icon_uri; }; if (option::is_some(&project_uri)){ - mutable_metadata.project_uri = option::extract(&mut project_uri); + let project_uri = option::extract(&mut project_uri); + assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.project_uri = project_uri; }; }
diff --git a/aptos-move/framework/supra-framework/doc/jwks.md b/aptos-move/framework/supra-framework/doc/jwks.md index 5db2ddabf93a4..59468edd81d6e 100644 --- a/aptos-move/framework/supra-framework/doc/jwks.md +++ b/aptos-move/framework/supra-framework/doc/jwks.md @@ -27,7 +27,10 @@ have a simple layout which is easily accessible in Rust. - [Struct `PatchUpsertJWK`](#0x1_jwks_PatchUpsertJWK) - [Resource `Patches`](#0x1_jwks_Patches) - [Resource `PatchedJWKs`](#0x1_jwks_PatchedJWKs) +- [Resource `FederatedJWKs`](#0x1_jwks_FederatedJWKs) - [Constants](#@Constants_0) +- [Function `patch_federated_jwks`](#0x1_jwks_patch_federated_jwks) +- [Function `update_federated_jwk_set`](#0x1_jwks_update_federated_jwk_set) - [Function `get_patched_jwk`](#0x1_jwks_get_patched_jwk) - [Function `try_get_patched_jwk`](#0x1_jwks_try_get_patched_jwk) - [Function `upsert_oidc_provider`](#0x1_jwks_upsert_oidc_provider) @@ -59,7 +62,8 @@ have a simple layout which is easily accessible in Rust. - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) -
use 0x1::chain_status;
+
use 0x1::bcs;
+use 0x1::chain_status;
 use 0x1::comparator;
 use 0x1::config_buffer;
 use 0x1::copyable_any;
@@ -67,6 +71,7 @@ have a simple layout which is easily accessible in Rust.
 use 0x1::event;
 use 0x1::option;
 use 0x1::reconfiguration;
+use 0x1::signer;
 use 0x1::string;
 use 0x1::system_addresses;
 use 0x1::vector;
@@ -589,6 +594,34 @@ This is what applications should consume.
 
 
 
+
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Resource `FederatedJWKs` + +JWKs for federated keyless accounts are stored in this resource. + + +
struct FederatedJWKs has drop, key
+
+ + +
Fields @@ -610,6 +643,33 @@ This is what applications should consume. ## Constants + + + + +
const EFEDERATED_JWKS_TOO_LARGE: u64 = 8;
+
+ + + + + + + +
const EINSTALL_FEDERATED_JWKS_AT_SUPRA_FRAMEWORK: u64 = 7;
+
+ + + + + + + +
const EINVALID_FEDERATED_JWK_SET: u64 = 9;
+
+ + + @@ -709,6 +769,156 @@ This is what applications should consume. + + +We limit the size of a PatchedJWKs resource installed by a dapp owner for federated keyless accounts. +Note: If too large, validators waste work reading it for invalid TXN signatures. + + +
const MAX_FEDERATED_JWKS_SIZE_BYTES: u64 = 2048;
+
+ + + + + +## Function `patch_federated_jwks` + +Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS +Cognito, etc). For type-safety, we explicitly use a struct FederatedJWKs { jwks: AllProviderJWKs } instead of +reusing PatchedJWKs { jwks: AllProviderJWKs }, which is a JWK-consensus-specific struct. + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<jwks::Patch>)
+
+ + + +
+Implementation + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<Patch>) acquires FederatedJWKs {
+    // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Supra framework address.
+    assert!(!system_addresses::is_supra_framework_address(signer::address_of(jwk_owner)),
+        error::invalid_argument(EINSTALL_FEDERATED_JWKS_AT_SUPRA_FRAMEWORK)
+    );
+
+    let jwk_addr = signer::address_of(jwk_owner);
+    if (!exists<FederatedJWKs>(jwk_addr)) {
+        move_to(jwk_owner, FederatedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+    };
+
+    let fed_jwks = borrow_global_mut<FederatedJWKs>(jwk_addr);
+    vector::for_each_ref(&patches, |obj|{
+        let patch: &Patch = obj;
+        apply_patch(&mut fed_jwks.jwks, *patch);
+    });
+
+    // TODO: Can we check the size more efficiently instead of serializing it via BCS?
+    let num_bytes = vector::length(&bcs::to_bytes(fed_jwks));
+    assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE));
+}
+
+ + + +
+ + + +## Function `update_federated_jwk_set` + +This can be called to install or update a set of JWKs for a federated OIDC provider. This function should +be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + +The iss parameter is the value of the iss claim on the JWTs that are to be verified by the JWK set. +kid_vec, alg_vec, e_vec, n_vec are String vectors of the JWK attributes kid, alg, e and n respectively. +See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + +For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - +```json +{ +"keys": [ +{ +"alg": "RS256", +"use": "sig", +"kty": "RSA", +"n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", +"kid": "d7b939771a7800c413f90051012d975981916d71", +"e": "AQAB" +}, +{ +"kty": "RSA", +"kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", +"alg": "RS256", +"n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", +"e": "AQAB", +"use": "sig" +} +] +} +``` + +We can call update_federated_jwk_set for Google's iss - "https://accounts.google.com" and for each vector +argument kid_vec, alg_vec, e_vec, n_vec, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the +the corresponding attribute in the second JWK as shown below. + +```move +use std::string::utf8; +supra_framework::jwks::update_federated_jwk_set( +jwk_owner, +b"https://accounts.google.com", +vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], +vector[utf8(b"RS256"), utf8(b"RS256")], +vector[utf8(b"AQAB"), utf8(b"AQAB")], +vector[ +utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), +utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") +] +) +``` + +See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + +NOTE: Currently only RSA keys are supported. + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<string::String>, alg_vec: vector<string::String>, e_vec: vector<string::String>, n_vec: vector<string::String>)
+
+ + + +
+Implementation + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<String>, alg_vec: vector<String>, e_vec: vector<String>, n_vec: vector<String>) acquires FederatedJWKs {
+    assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    let num_jwk = vector::length<String>(&kid_vec);
+    assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+
+    let remove_all_patch = new_patch_remove_all();
+    let patches = vector[remove_all_patch];
+    while (!vector::is_empty(&kid_vec)) {
+        let kid = vector::pop_back(&mut kid_vec);
+        let alg = vector::pop_back(&mut alg_vec);
+        let e = vector::pop_back(&mut e_vec);
+        let n = vector::pop_back(&mut n_vec);
+        let jwk = new_rsa_jwk(kid, alg, e, n);
+        let patch = new_patch_upsert_jwk(iss, jwk);
+        vector::push_back(&mut patches, patch)
+    };
+    patch_federated_jwks(jwk_owner, patches);
+}
+
+ + + +
+ ## Function `get_patched_jwk` @@ -809,7 +1019,7 @@ supra_framework::jwks::upsert_oidc_provider_for_next_epoch( b"https://accounts.google.com", b"https://accounts.google.com/.well-known/openid-configuration" ); -supra_framework::aptos_governance::reconfigure(&framework_signer); +supra_framework::supra_governance::reconfigure(&framework_signer); ``` @@ -884,7 +1094,7 @@ supra_framework::jwks::remove_oidc_provider_for_next_epoch( &framework_signer, b"https://accounts.google.com", ); -supra_framework::aptos_governance::reconfigure(&framework_signer); +supra_framework::supra_governance::reconfigure(&framework_signer); ``` @@ -1315,7 +1525,7 @@ Regenerate PatchedJWKs f ## Function `try_get_jwk_by_issuer` -Get a JWK by issuer and key ID from a AllProvidersJWKs, if it exists. +Get a JWK by issuer and key ID from an AllProvidersJWKs, if it exists.
fun try_get_jwk_by_issuer(jwks: &jwks::AllProvidersJWKs, issuer: vector<u8>, jwk_id: vector<u8>): option::Option<jwks::JWK>
diff --git a/aptos-move/framework/supra-framework/doc/object.md b/aptos-move/framework/supra-framework/doc/object.md
index f59f5def50f92..7c4cd47c02644 100644
--- a/aptos-move/framework/supra-framework/doc/object.md
+++ b/aptos-move/framework/supra-framework/doc/object.md
@@ -604,6 +604,16 @@ generate_unique_address uses this for domain separation within its native implem
 
 
 
+
+
+Objects cannot be burnt
+
+
+
const EBURN_NOT_ALLOWED: u64 = 10;
+
+ + + The object does not allow for deletion @@ -2130,12 +2140,13 @@ objects may have cyclic dependencies. ## Function `burn` -Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. -This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. -Original owners can reclaim burnt objects any time in the future by calling unburn. +Previously allowed to burn objects, has now been disabled. Objects can still be unburnt. +Please use the test only [object::burn_object] for testing with previously burned objects. -
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
+
+
#[deprecated]
+public entry fun burn<T: key>(_owner: &signer, _object: object::Object<T>)
 
@@ -2144,12 +2155,8 @@ Original owners can reclaim burnt objects any time in the future by calling unbu Implementation -
public entry fun burn<T: key>(owner: &signer, object: Object<T>) acquires ObjectCore {
-    let original_owner = signer::address_of(owner);
-    assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER));
-    let object_addr = object.inner;
-    move_to(&create_signer(object_addr), TombStone { original_owner });
-    transfer_raw_inner(object_addr, BURN_ADDRESS);
+
public entry fun burn<T: key>(_owner: &signer, _object: Object<T>) {
+    abort error::permission_denied(EBURN_NOT_ALLOWED)
 }
 
@@ -2441,6 +2448,33 @@ to determine the identity of the starting point of ownership. + + + + +
fun spec_create_object_address(source: address, seed: vector<u8>): address;
+
+ + + + + + + +
fun spec_create_user_derived_object_address(source: address, derive_from: address): address;
+
+ + + + + + + +
fun spec_create_guid_object_address(source: address, creation_num: u64): address;
+
+ + + ### Function `address_to_object` @@ -3245,17 +3279,14 @@ to determine the identity of the starting point of ownership. ### Function `burn` -
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
+
#[deprecated]
+public entry fun burn<T: key>(_owner: &signer, _object: object::Object<T>)
 
-
pragma aborts_if_is_partial;
-let object_address = object.inner;
-aborts_if !exists<ObjectCore>(object_address);
-aborts_if owner(object) != signer::address_of(owner);
-aborts_if is_burnt(object);
+
aborts_if true;
 
@@ -3368,31 +3399,4 @@ to determine the identity of the starting point of ownership.
- - - - - -
fun spec_create_object_address(source: address, seed: vector<u8>): address;
-
- - - - - - - -
fun spec_create_user_derived_object_address(source: address, derive_from: address): address;
-
- - - - - - - -
fun spec_create_guid_object_address(source: address, creation_num: u64): address;
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/doc/reconfiguration_state.md b/aptos-move/framework/supra-framework/doc/reconfiguration_state.md index 9740a88d4f798..6290fc3524567 100644 --- a/aptos-move/framework/supra-framework/doc/reconfiguration_state.md +++ b/aptos-move/framework/supra-framework/doc/reconfiguration_state.md @@ -562,7 +562,7 @@ Abort if the current state is not "in progress". include copyable_any::type_name(global<State>(@supra_framework).variant).bytes == b"0x1::reconfiguration_state::StateActive" ==> copyable_any::UnpackAbortsIf<StateActive> { - x: global<State>(@supra_framework).variant + self: global<State>(@supra_framework).variant }; aborts_if copyable_any::type_name(global<State>(@supra_framework).variant).bytes != b"0x1::reconfiguration_state::StateActive"; diff --git a/aptos-move/framework/supra-framework/doc/resource_account.md b/aptos-move/framework/supra-framework/doc/resource_account.md index 3897cb991d51b..0a55263beafe3 100644 --- a/aptos-move/framework/supra-framework/doc/resource_account.md +++ b/aptos-move/framework/supra-framework/doc/resource_account.md @@ -593,7 +593,6 @@ the SignerCapability. let container = global<Container>(source_addr); let get = len(optional_auth_key) == 0; let account = global<account::Account>(source_addr); - requires source_addr != resource_addr; aborts_if len(ZERO_AUTH_KEY) != 32; include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; diff --git a/aptos-move/framework/supra-framework/doc/staking_contract.md b/aptos-move/framework/supra-framework/doc/staking_contract.md index fdf925a255408..bfffd1c87857e 100644 --- a/aptos-move/framework/supra-framework/doc/staking_contract.md +++ b/aptos-move/framework/supra-framework/doc/staking_contract.md @@ -91,6 +91,7 @@ pool. - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) + - [Struct `StakingContract`](#@Specification_1_StakingContract) - [Function `stake_pool_address`](#@Specification_1_stake_pool_address) - [Function `last_recorded_principal`](#@Specification_1_last_recorded_principal) - [Function `commission_percentage`](#@Specification_1_commission_percentage) @@ -1339,7 +1340,7 @@ Staker has no staking contracts. -Chaning beneficiaries for operators is not supported. +Changing beneficiaries for operators is not supported.
const EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED: u64 = 9;
@@ -2763,6 +2764,62 @@ Create a new staking_contracts resource.
 
 
 
+
+
+### Struct `StakingContract`
+
+
+
struct StakingContract has store
+
+ + + +
+
+principal: u64 +
+
+ +
+
+pool_address: address +
+
+ +
+
+owner_cap: stake::OwnerCapability +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+distribution_pool: pool_u64::Pool +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + + +
invariant commission_percentage >= 0 && commission_percentage <= 100;
+
+ + + ### Function `stake_pool_address` @@ -2836,7 +2893,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify_duration_estimate = 120;
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staking_contracts = global<Store>(staker).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
 include ContractExistsAbortsIf;
@@ -3023,6 +3079,7 @@ Staking_contract exists the stacker/operator pair.
 let post staking_contract = simple_map::spec_get(store.staking_contracts, operator);
 let post pool_address = staking_contract.owner_cap.pool_address;
 let post new_delegated_voter = global<stake::StakePool>(pool_address).delegated_voter;
+// This enforces high-level requirement 4:
 ensures new_delegated_voter == new_voter;
 
@@ -3117,7 +3174,6 @@ Only staker or operator can call this.
pragma verify = false;
-requires amount > 0;
 let staker_address = signer::address_of(staker);
 include ContractExistsAbortsIf { staker: staker_address };
 
@@ -3137,8 +3193,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify = false;
-// This enforces high-level requirement 4:
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staker_address = signer::address_of(staker);
 let staking_contracts = global<Store>(staker_address).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
@@ -3372,7 +3426,7 @@ The StakePool exists under the pool_address of StakingContract.
 
 
 The Account exists under the staker.
-The guid_creation_num of the ccount resource is up to MAX_U64.
+The guid_creation_num of the account resource is up to MAX_U64.
 
 
 
include NewStakingContractsHolderAbortsIf;
diff --git a/aptos-move/framework/supra-framework/doc/supra_account.md b/aptos-move/framework/supra-framework/doc/supra_account.md
index 8f977a63841e9..47eb65e8e77c3 100644
--- a/aptos-move/framework/supra-framework/doc/supra_account.md
+++ b/aptos-move/framework/supra-framework/doc/supra_account.md
@@ -585,7 +585,7 @@ TODO: once migration is complete, rename to just "transfer_only" and make it an
 to transfer SUPRA) - if we want to allow SUPRA PFS without account itself
 
 
-
fun fungible_transfer_only(source: &signer, to: address, amount: u64)
+
public(friend) entry fun fungible_transfer_only(source: &signer, to: address, amount: u64)
 
@@ -594,7 +594,7 @@ to transfer SUPRA) - if we want to allow SUPRA PFS without account itself Implementation -
fun fungible_transfer_only(
+
public(friend) entry fun fungible_transfer_only(
     source: &signer, to: address, amount: u64
 ) {
     let sender_store = ensure_primary_fungible_store_exists(signer::address_of(source));
@@ -879,10 +879,6 @@ Limit the address of auth_key is not @vm_reserved / @supra_framework / @aptos_to
 let account_addr_source = signer::address_of(source);
 let coin_store_source = global<coin::CoinStore<SupraCoin>>(account_addr_source);
 let balance_source = coin_store_source.coin.value;
-requires forall i in 0..len(recipients):
-    recipients[i] != account_addr_source;
-requires exists i in 0..len(recipients):
-    amounts[i] > 0;
 aborts_if len(recipients) != len(amounts);
 aborts_if exists i in 0..len(recipients):
         !account::exists_at(recipients[i]) && length_judgment(recipients[i]);
@@ -920,7 +916,6 @@ Limit the address of auth_key is not @vm_reserved / @supra_framework / @aptos_to
 
 
pragma verify = false;
 let account_addr_source = signer::address_of(source);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include GuidAbortsIf<SupraCoin>;
 include WithdrawAbortsIf<SupraCoin>{from: source};
@@ -948,10 +943,6 @@ Limit the address of auth_key is not @vm_reserved / @supra_framework / @aptos_to
 let account_addr_source = signer::address_of(from);
 let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
 let balance_source = coin_store_source.coin.value;
-requires forall i in 0..len(recipients):
-    recipients[i] != account_addr_source;
-requires exists i in 0..len(recipients):
-    amounts[i] > 0;
 // This enforces high-level requirement 7:
 aborts_if len(recipients) != len(amounts);
 aborts_if exists i in 0..len(recipients):
@@ -992,7 +983,6 @@ Limit the address of auth_key is not @vm_reserved / @supra_framework / @aptos_to
 
 
pragma verify = false;
 let account_addr_source = signer::address_of(from);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include WithdrawAbortsIf<CoinType>;
 include GuidAbortsIf<CoinType>;
@@ -1127,7 +1117,7 @@ Check if the SupraCoin under the address existed.
 ### Function `fungible_transfer_only`
 
 
-
fun fungible_transfer_only(source: &signer, to: address, amount: u64)
+
public(friend) entry fun fungible_transfer_only(source: &signer, to: address, amount: u64)
 
diff --git a/aptos-move/framework/supra-framework/doc/transaction_validation.md b/aptos-move/framework/supra-framework/doc/transaction_validation.md index 5aa36bc15fedf..c2ebbf8c10400 100644 --- a/aptos-move/framework/supra-framework/doc/transaction_validation.md +++ b/aptos-move/framework/supra-framework/doc/transaction_validation.md @@ -10,22 +10,34 @@ - [Function `initialize`](#0x1_transaction_validation_initialize) - [Function `prologue_common`](#0x1_transaction_validation_prologue_common) - [Function `script_prologue`](#0x1_transaction_validation_script_prologue) +- [Function `script_prologue_extended`](#0x1_transaction_validation_script_prologue_extended) - [Function `multi_agent_script_prologue`](#0x1_transaction_validation_multi_agent_script_prologue) +- [Function `multi_agent_script_prologue_extended`](#0x1_transaction_validation_multi_agent_script_prologue_extended) - [Function `multi_agent_common_prologue`](#0x1_transaction_validation_multi_agent_common_prologue) - [Function `fee_payer_script_prologue`](#0x1_transaction_validation_fee_payer_script_prologue) +- [Function `fee_payer_script_prologue_extended`](#0x1_transaction_validation_fee_payer_script_prologue_extended) - [Function `epilogue`](#0x1_transaction_validation_epilogue) +- [Function `epilogue_extended`](#0x1_transaction_validation_epilogue_extended) - [Function `epilogue_gas_payer`](#0x1_transaction_validation_epilogue_gas_payer) +- [Function `epilogue_gas_payer_extended`](#0x1_transaction_validation_epilogue_gas_payer_extended) +- [Function `skip_auth_key_check`](#0x1_transaction_validation_skip_auth_key_check) +- [Function `skip_gas_payment`](#0x1_transaction_validation_skip_gas_payment) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - [Function `initialize`](#@Specification_1_initialize) - [Function `prologue_common`](#@Specification_1_prologue_common) - [Function `script_prologue`](#@Specification_1_script_prologue) + - [Function `script_prologue_extended`](#@Specification_1_script_prologue_extended) - [Function `multi_agent_script_prologue`](#@Specification_1_multi_agent_script_prologue) + - [Function `multi_agent_script_prologue_extended`](#@Specification_1_multi_agent_script_prologue_extended) - [Function `multi_agent_common_prologue`](#@Specification_1_multi_agent_common_prologue) - [Function `fee_payer_script_prologue`](#@Specification_1_fee_payer_script_prologue) + - [Function `fee_payer_script_prologue_extended`](#@Specification_1_fee_payer_script_prologue_extended) - [Function `epilogue`](#@Specification_1_epilogue) + - [Function `epilogue_extended`](#@Specification_1_epilogue_extended) - [Function `epilogue_gas_payer`](#@Specification_1_epilogue_gas_payer) + - [Function `epilogue_gas_payer_extended`](#@Specification_1_epilogue_gas_payer_extended)
use 0x1::account;
@@ -40,6 +52,7 @@
 use 0x1::system_addresses;
 use 0x1::timestamp;
 use 0x1::transaction_fee;
+use 0x1::vector;
 
@@ -269,7 +282,7 @@ Only called during genesis to initialize system resources for this module. -
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
 
@@ -287,6 +300,7 @@ Only called during genesis to initialize system resources for this module. txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, + is_simulation: bool, ) { assert!( timestamp::now_seconds() < txn_expiration_time, @@ -303,10 +317,13 @@ Only called during genesis to initialize system resources for this module. || txn_sequence_number > 0 ) { assert!(account::exists_at(transaction_sender), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST)); - assert!( - txn_authentication_key == account::get_authentication_key(transaction_sender), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if (!features::transaction_simulation_enhancement_enabled() || + !skip_auth_key_check(is_simulation, &txn_authentication_key)) { + assert!( + txn_authentication_key == account::get_authentication_key(transaction_sender), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ) + }; let account_sequence_number = account::get_sequence_number(transaction_sender); assert!( @@ -331,24 +348,29 @@ Only called during genesis to initialize system resources for this module. error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW) ); - assert!( - txn_authentication_key == bcs::to_bytes(&transaction_sender), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if (!features::transaction_simulation_enhancement_enabled() || + !skip_auth_key_check(is_simulation, &txn_authentication_key)) { + assert!( + txn_authentication_key == bcs::to_bytes(&transaction_sender), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ); + } }; let max_transaction_fee = txn_gas_price * txn_max_gas_units; - if (features::operations_default_to_fa_supra_store_enabled()) { - assert!( - supra_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); - } else { - assert!( - coin::is_balance_at_least<SupraCoin>(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); + if (!features::transaction_simulation_enhancement_enabled() || !skip_gas_payment(is_simulation, gas_payer)) { + if (features::operations_default_to_fa_supra_store_enabled()) { + assert!( + supra_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee), + error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) + ); + } else { + assert!( + coin::is_balance_at_least<SupraCoin>(gas_payer, max_transaction_fee), + error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) + ); + } } }
@@ -383,6 +405,7 @@ Only called during genesis to initialize system resources for this module. _script_hash: vector<u8>, ) { let gas_payer = signer::address_of(&sender); + // prologue_common with is_simulation set to false behaves identically to the original script_prologue function. prologue_common( sender, gas_payer, @@ -391,7 +414,53 @@ Only called during genesis to initialize system resources for this module. txn_gas_price, txn_max_gas_units, txn_expiration_time, - chain_id + chain_id, + false, + ) +} +
+ + + +
+ + + +## Function `script_prologue_extended` + + + +
fun script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_public_key: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    _script_hash: vector<u8>,
+    is_simulation: bool,
+) {
+    let gas_payer = signer::address_of(&sender);
+    prologue_common(
+        sender,
+        gas_payer,
+        txn_sequence_number,
+        txn_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
     )
 }
 
@@ -427,6 +496,8 @@ Only called during genesis to initialize system resources for this module. chain_id: u8, ) { let sender_addr = signer::address_of(&sender); + // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the + // original multi_agent_script_prologue function. prologue_common( sender, sender_addr, @@ -436,8 +507,56 @@ Only called during genesis to initialize system resources for this module. txn_max_gas_units, txn_expiration_time, chain_id, + false, ); - multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, false); +} +
+ + + + + + + +## Function `multi_agent_script_prologue_extended` + + + +
fun multi_agent_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun multi_agent_script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    let sender_addr = signer::address_of(&sender);
+    prologue_common(
+        sender,
+        sender_addr,
+        txn_sequence_number,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation);
 }
 
@@ -451,7 +570,7 @@ Only called during genesis to initialize system resources for this module. -
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>)
+
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, is_simulation: bool)
 
@@ -463,6 +582,7 @@ Only called during genesis to initialize system resources for this module.
fun multi_agent_common_prologue(
     secondary_signer_addresses: vector<address>,
     secondary_signer_public_key_hashes: vector<vector<u8>>,
+    is_simulation: bool,
 ) {
     let num_secondary_signers = vector::length(&secondary_signer_addresses);
     assert!(
@@ -475,9 +595,10 @@ Only called during genesis to initialize system resources for this module.
         spec {
             invariant i <= num_secondary_signers;
             invariant forall j in 0..i:
-                account::exists_at(secondary_signer_addresses[j])
-                    && secondary_signer_public_key_hashes[j]
-                    == account::get_authentication_key(secondary_signer_addresses[j]);
+                account::exists_at(secondary_signer_addresses[j]);
+            invariant forall j in 0..i:
+                secondary_signer_public_key_hashes[j] == account::get_authentication_key(secondary_signer_addresses[j]) ||
+                    (features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(secondary_signer_public_key_hashes[j]));
         };
         (i < num_secondary_signers)
     }) {
@@ -485,10 +606,13 @@ Only called during genesis to initialize system resources for this module.
         assert!(account::exists_at(secondary_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST));
 
         let signer_public_key_hash = *vector::borrow(&secondary_signer_public_key_hashes, i);
-        assert!(
-            signer_public_key_hash == account::get_authentication_key(secondary_address),
-            error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
-        );
+        if (!features::transaction_simulation_enhancement_enabled() ||
+                !skip_auth_key_check(is_simulation, &signer_public_key_hash)) {
+            assert!(
+                signer_public_key_hash == account::get_authentication_key(secondary_address),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+            )
+        };
         i = i + 1;
     }
 }
@@ -527,6 +651,8 @@ Only called during genesis to initialize system resources for this module.
     chain_id: u8,
 ) {
     assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED));
+    // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the
+    // original fee_payer_script_prologue function.
     prologue_common(
         sender,
         fee_payer_address,
@@ -536,8 +662,9 @@ Only called during genesis to initialize system resources for this module.
         txn_max_gas_units,
         txn_expiration_time,
         chain_id,
+        false,
     );
-    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes);
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, false);
     assert!(
         fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address),
         error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
@@ -547,6 +674,62 @@ Only called during genesis to initialize system resources for this module.
 
 
 
+
+ + + +## Function `fee_payer_script_prologue_extended` + + + +
fun fee_payer_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun fee_payer_script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    fee_payer_address: address,
+    fee_payer_public_key_hash: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED));
+    prologue_common(
+        sender,
+        fee_payer_address,
+        txn_sequence_number,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation);
+    if (!features::transaction_simulation_enhancement_enabled() ||
+        !skip_auth_key_check(is_simulation, &fee_payer_public_key_hash)) {
+        assert!(
+            fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address),
+            error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+        )
+    }
+}
+
+ + +
@@ -571,7 +754,7 @@ Called by the Adapter storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, ) { let addr = signer::address_of(&account); epilogue_gas_payer(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining); @@ -580,6 +763,38 @@ Called by the Adapter + + + + +## Function `epilogue_extended` + + + +
fun epilogue_extended(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun epilogue_extended(
+    account: signer,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
+) {
+    let addr = signer::address_of(&account);
+    epilogue_gas_payer_extended(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining, is_simulation);
+}
+
+ + +
@@ -605,7 +820,49 @@ Called by the Adapter storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, +) { + // epilogue_gas_payer_extended with is_simulation set to false behaves identically to the original + // epilogue_gas_payer function. + epilogue_gas_payer_extended( + account, + gas_payer, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining, + false, + ); +} +
+ + + + + + + +## Function `epilogue_gas_payer_extended` + + + +
fun epilogue_gas_payer_extended(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun epilogue_gas_payer_extended(
+    account: signer,
+    gas_payer: address,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
 ) {
     assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS));
     let gas_used = txn_max_gas_units - gas_units_remaining;
@@ -618,39 +875,41 @@ Called by the Adapter
 
     // it's important to maintain the error code consistent with vm
     // to do failed transaction cleanup.
-    if (features::operations_default_to_fa_supra_store_enabled()) {
-        assert!(
-            supra_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount),
-            error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
-        );
-    } else {
-        assert!(
-            coin::is_balance_at_least<SupraCoin>(gas_payer, transaction_fee_amount),
-            error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
-        );
-    };
-
-    let amount_to_burn = if (features::collect_and_distribute_gas_fees()) {
-        // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track
-        // it separately, so that we don't increase the total supply by refunding.
+    if (!features::transaction_simulation_enhancement_enabled() || !skip_gas_payment(is_simulation, gas_payer)) {
+        if (features::operations_default_to_fa_supra_store_enabled()) {
+            assert!(
+                supra_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        } else {
+            assert!(
+                coin::is_balance_at_least<SupraCoin>(gas_payer, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        };
 
-        // If transaction fees are redistributed to validators, collect them here for
-        // later redistribution.
-        transaction_fee::collect_fee(gas_payer, transaction_fee_amount);
-        0
-    } else {
-        // Otherwise, just burn the fee.
-        // TODO: this branch should be removed completely when transaction fee collection
-        // is tested and is fully proven to work well.
-        transaction_fee_amount
-    };
+        let amount_to_burn = if (features::collect_and_distribute_gas_fees()) {
+            // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track
+            // it separately, so that we don't increase the total supply by refunding.
+
+            // If transaction fees are redistributed to validators, collect them here for
+            // later redistribution.
+            transaction_fee::collect_fee(gas_payer, transaction_fee_amount);
+            0
+        } else {
+            // Otherwise, just burn the fee.
+            // TODO: this branch should be removed completely when transaction fee collection
+            // is tested and is fully proven to work well.
+            transaction_fee_amount
+        };
 
-    if (amount_to_burn > storage_fee_refunded) {
-        let burn_amount = amount_to_burn - storage_fee_refunded;
-        transaction_fee::burn_fee(gas_payer, burn_amount);
-    } else if (amount_to_burn < storage_fee_refunded) {
-        let mint_amount = storage_fee_refunded - amount_to_burn;
-        transaction_fee::mint_and_refund(gas_payer, mint_amount)
+        if (amount_to_burn > storage_fee_refunded) {
+            let burn_amount = amount_to_burn - storage_fee_refunded;
+            transaction_fee::burn_fee(gas_payer, burn_amount);
+        } else if (amount_to_burn < storage_fee_refunded) {
+            let mint_amount = storage_fee_refunded - amount_to_burn;
+            transaction_fee::mint_and_refund(gas_payer, mint_amount)
+        };
     };
 
     // Increment sequence number
@@ -661,6 +920,54 @@ Called by the Adapter
 
 
 
+
+ + + +## Function `skip_auth_key_check` + + + +
fun skip_auth_key_check(is_simulation: bool, auth_key: &vector<u8>): bool
+
+ + + +
+Implementation + + +
inline fun skip_auth_key_check(is_simulation: bool, auth_key: &vector<u8>): bool {
+    is_simulation && vector::is_empty(auth_key)
+}
+
+ + + +
+ + + +## Function `skip_gas_payment` + + + +
fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool
+
+ + + +
+Implementation + + +
inline fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool {
+    is_simulation && gas_payer == @0x0
+}
+
+ + +
@@ -792,7 +1099,7 @@ Give some constraints that may abort according to the conditions. ### Function `prologue_common` -
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
 
@@ -816,10 +1123,6 @@ Give some constraints that may abort according to the conditions.
pragma verify = false;
-include PrologueCommonAbortsIf {
-    gas_payer: signer::address_of(sender),
-    txn_authentication_key: txn_public_key
-};
 
@@ -831,22 +1134,57 @@ Give some constraints that may abort according to the conditions.
schema MultiAgentPrologueCommonAbortsIf {
     secondary_signer_addresses: vector<address>;
     secondary_signer_public_key_hashes: vector<vector<u8>>;
+    is_simulation: bool;
     let num_secondary_signers = len(secondary_signer_addresses);
     aborts_if len(secondary_signer_public_key_hashes) != num_secondary_signers;
     // This enforces high-level requirement 2:
     aborts_if exists i in 0..num_secondary_signers:
-        !account::exists_at(secondary_signer_addresses[i])
-            || secondary_signer_public_key_hashes[i] !=
-            account::get_authentication_key(secondary_signer_addresses[i]);
+        !account::exists_at(secondary_signer_addresses[i]);
+    aborts_if exists i in 0..num_secondary_signers:
+        !can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]) &&
+            secondary_signer_public_key_hashes[i] !=
+                account::get_authentication_key(secondary_signer_addresses[i]);
     ensures forall i in 0..num_secondary_signers:
-        account::exists_at(secondary_signer_addresses[i])
-            && secondary_signer_public_key_hashes[i] ==
-            account::get_authentication_key(secondary_signer_addresses[i]);
+        account::exists_at(secondary_signer_addresses[i]);
+    ensures forall i in 0..num_secondary_signers:
+        secondary_signer_public_key_hashes[i] == account::get_authentication_key(secondary_signer_addresses[i])
+            || can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]);
 }
 
+ + + + +
fun can_skip(feature_flag: bool, is_simulation: bool, auth_key: vector<u8>): bool {
+   features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(auth_key)
+}
+
+ + + + + +### Function `script_prologue_extended` + + +
fun script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+include PrologueCommonAbortsIf {
+    gas_payer: signer::address_of(sender),
+    txn_authentication_key: txn_public_key
+};
+
+ + + ### Function `multi_agent_script_prologue` @@ -856,6 +1194,22 @@ Give some constraints that may abort according to the conditions.
+ + +
pragma verify = false;
+
+ + + + + +### Function `multi_agent_script_prologue_extended` + + +
fun multi_agent_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + Aborts if length of public key hashed vector not equal the number of singers. @@ -871,6 +1225,7 @@ not equal the number of singers. include MultiAgentPrologueCommonAbortsIf { secondary_signer_addresses, secondary_signer_public_key_hashes, + is_simulation, };
@@ -881,7 +1236,7 @@ not equal the number of singers. ### Function `multi_agent_common_prologue` -
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>)
+
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, is_simulation: bool)
 
@@ -890,6 +1245,7 @@ not equal the number of singers.
include MultiAgentPrologueCommonAbortsIf {
     secondary_signer_addresses,
     secondary_signer_public_key_hashes,
+    is_simulation,
 };
 
@@ -906,6 +1262,22 @@ not equal the number of singers. +
pragma verify = false;
+
+ + + + + +### Function `fee_payer_script_prologue_extended` + + +
fun fee_payer_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
pragma verify_duration_estimate = 120;
 aborts_if !features::spec_is_enabled(features::FEE_PAYER_ENABLED);
 let gas_payer = fee_payer_address;
@@ -917,6 +1289,7 @@ not equal the number of singers.
 include MultiAgentPrologueCommonAbortsIf {
     secondary_signer_addresses,
     secondary_signer_public_key_hashes,
+    is_simulation,
 };
 aborts_if !account::exists_at(gas_payer);
 aborts_if !(fee_payer_public_key_hash == account::get_authentication_key(gas_payer));
@@ -934,6 +1307,22 @@ not equal the number of singers.
 
+ + +
pragma verify = false;
+
+ + + + + +### Function `epilogue_extended` + + +
fun epilogue_extended(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + Abort according to the conditions. SupraCoinCapabilities and CoinInfo should exists. Skip transaction_fee::burn_fee verification. @@ -954,13 +1343,9 @@ Skip transaction_fee::burn_fee verification.
-Abort according to the conditions. -SupraCoinCapabilities and CoinInfo should exist. -Skip transaction_fee::burn_fee verification.
pragma verify = false;
-include EpilogueGasPayerAbortsIf;
 
@@ -1030,4 +1415,24 @@ Skip transaction_fee::burn_fee verification.
+ + + +### Function `epilogue_gas_payer_extended` + + +
fun epilogue_gas_payer_extended(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + +Abort according to the conditions. +SupraCoinCapabilities and CoinInfo should exist. +Skip transaction_fee::burn_fee verification. + + +
pragma verify = false;
+include EpilogueGasPayerAbortsIf;
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/doc/vesting.md b/aptos-move/framework/supra-framework/doc/vesting.md index d472cbb7abc6e..2f922b9075945 100644 --- a/aptos-move/framework/supra-framework/doc/vesting.md +++ b/aptos-move/framework/supra-framework/doc/vesting.md @@ -3686,7 +3686,6 @@ This address should be deterministic for the same admin and vesting contract cre
schema TotalAccumulatedRewardsAbortsIf {
     vesting_contract_address: address;
-    requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
     include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
     let vesting_contract = global<VestingContract>(vesting_contract_address);
     let staker = vesting_contract_address;
@@ -3892,7 +3891,6 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
@@ -3927,20 +3925,6 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
-
- - - - - - - -
schema PreconditionAbortsIf {
-    contract_addresses: vector<address>;
-    requires forall i in 0..len(contract_addresses): simple_map::spec_get(global<staking_contract::Store>(contract_addresses[i]).staking_contracts, global<VestingContract>(contract_addresses[i]).staking.operator).commission_percentage >= 0
-        && simple_map::spec_get(global<staking_contract::Store>(contract_addresses[i]).staking_contracts, global<VestingContract>(contract_addresses[i]).staking.operator).commission_percentage <= 100;
-}
 
diff --git a/aptos-move/framework/supra-framework/doc/vesting_without_staking.md b/aptos-move/framework/supra-framework/doc/vesting_without_staking.md index 8b6ddf1f8ef2e..fd1a383778704 100644 --- a/aptos-move/framework/supra-framework/doc/vesting_without_staking.md +++ b/aptos-move/framework/supra-framework/doc/vesting_without_staking.md @@ -1114,7 +1114,7 @@ Create a vesting schedule with the given schedule of distributions, a vesting st -
public entry fun create_vesting_contract_with_amounts(admin: &signer, shareholders: vector<address>, amounts: vector<u64>, schedule_numerator: vector<u64>, schedule_denominator: u64, start_timestamp_secs: u64, period_duration: u64, withdrawal_address: address, contract_creation_seed: vector<u8>)
+
public entry fun create_vesting_contract_with_amounts(admin: &signer, shareholders: vector<address>, shares: vector<u64>, vesting_numerators: vector<u64>, vesting_denominator: u64, start_timestamp_secs: u64, period_duration: u64, withdrawal_address: address, contract_creation_seed: vector<u8>)
 
@@ -1126,9 +1126,9 @@ Create a vesting schedule with the given schedule of distributions, a vesting st
public entry fun create_vesting_contract_with_amounts (
     admin: &signer,
     shareholders: vector<address>,
-    amounts: vector<u64>,
-    schedule_numerator: vector<u64>,
-    schedule_denominator: u64,
+    shares: vector<u64>,
+    vesting_numerators: vector<u64>,
+    vesting_denominator: u64,
     start_timestamp_secs: u64,
     period_duration: u64,
     withdrawal_address: address,
@@ -1136,11 +1136,11 @@ Create a vesting schedule with the given schedule of distributions, a vesting st
 ) acquires AdminStore {
     assert!(!system_addresses::is_reserved_address(withdrawal_address),
         error::invalid_argument(EINVALID_WITHDRAWAL_ADDRESS),);
-    assert_account_is_registered_for_apt(withdrawal_address);
+    assert_account_is_registered_for_supra(withdrawal_address);
     assert!(vector::length(&shareholders) > 0,
         error::invalid_argument(ENO_SHAREHOLDERS));
     assert!(
-        vector::length(&shareholders) == vector::length(&amounts),
+        vector::length(&shareholders) == vector::length(&shares),
         error::invalid_argument(ESHARES_LENGTH_MISMATCH),
     );
 
@@ -1160,15 +1160,15 @@ Create a vesting schedule with the given schedule of distributions, a vesting st
     let (contract_signer, contract_signer_cap) = create_vesting_contract_account(admin,
         contract_creation_seed);
     let contract_signer_address = signer::address_of(&contract_signer);
-    let schedule = vector::map_ref(&schedule_numerator, |numerator| {
-        let event = fixed_point32::create_from_rational(*numerator, schedule_denominator);
+    let schedule = vector::map_ref(&vesting_numerators, |numerator| {
+        let event = fixed_point32::create_from_rational(*numerator, vesting_denominator);
         event
     });
 
     let vesting_schedule = create_vesting_schedule(schedule, start_timestamp_secs, period_duration);
     let shareholders_map = simple_map::create<address, VestingRecord>();
     let grant_amount = 0;
-    vector::for_each_reverse(amounts, |amount| {
+    vector::for_each_reverse(shares, |amount| {
         let shareholder = vector::pop_back(&mut shareholders);
         simple_map::add(&mut shareholders_map,
             shareholder,
@@ -2278,13 +2278,18 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
 
-
pragma verify = true;
+
pragma verify = false;
 let amount = min(vesting_record.left_amount, fixed_point32::spec_multiply_u64(vesting_record.init_amount, vesting_fraction));
 ensures vesting_record.left_amount == old(vesting_record.left_amount) - amount;
 let address_from = signer_cap.account;
-ensures beneficiary != address_from ==>
-    (coin::balance<SupraCoin>(beneficiary) == old(coin::balance<SupraCoin>(beneficiary)) + amount
-    && coin::balance<SupraCoin>(address_from) == old(coin::balance<SupraCoin>(address_from)) - amount);
+let coin_store_from = global<coin::CoinStore<SupraCoin>>(address_from);
+let post coin_store_post_from = global<coin::CoinStore<SupraCoin>>(address_from);
+let coin_store_to = global<coin::CoinStore<SupraCoin>>(beneficiary);
+let post coin_store_post_to = global<coin::CoinStore<SupraCoin>>(beneficiary);
+ensures beneficiary != address_from ==> coin_store_post_from.coin.value ==
+    coin_store_from.coin.value - amount;
+ensures beneficiary != address_from ==> coin_store_post_to.coin.value == coin_store_to.coin.value + amount;
+ensures beneficiary == address_from ==> coin_store_post_from.coin.value == coin_store_from.coin.value;
 
@@ -2300,15 +2305,15 @@ This address should be deterministic for the same admin and vesting contract cre -
pragma verify = true;
+
pragma verify = false;
 pragma aborts_if_is_partial = true;
 include AdminAborts;
 let vesting_contract = global<VestingContract>(contract_address);
 let post vesting_contract_post = global<VestingContract>(contract_address);
-let balance_pre = coin::balance<SupraCoin>(vesting_contract.withdrawal_address);
-let post balance_post = coin::balance<SupraCoin>(vesting_contract_post.withdrawal_address);
+let balance_pre = global<coin::CoinStore<SupraCoin>>(vesting_contract.withdrawal_address).coin.value;
+let post balance_post = global<coin::CoinStore<SupraCoin>>(vesting_contract.withdrawal_address).coin.value;
 let shareholder_amount = simple_map::spec_get(vesting_contract.shareholders, shareholder_address).left_amount;
-ensures vesting_contract_post.withdrawal_address != vesting_contract.signer_cap.account ==> balance_post == balance_pre + shareholder_amount;
+ensures vesting_contract_post.withdrawal_address != vesting_contract.signer_cap.account;
 ensures !simple_map::spec_contains_key(vesting_contract_post.shareholders, shareholder_address);
 ensures !simple_map::spec_contains_key(vesting_contract_post.beneficiaries, shareholder_address);
 
@@ -2329,9 +2334,9 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = true;
 pragma aborts_if_is_partial = true;
 let vesting_contract = global<VestingContract>(contract_address);
-let balance_pre = coin::balance<SupraCoin>(vesting_contract.withdrawal_address);
-let post balance_post = coin::balance<SupraCoin>(vesting_contract.withdrawal_address);
-let post balance_contract = coin::balance<SupraCoin>(contract_address);
+let balance_pre = global<coin::CoinStore<SupraCoin>>(vesting_contract.withdrawal_address).coin.value;
+let post balance_post = global<coin::CoinStore<SupraCoin>>(vesting_contract.withdrawal_address).coin.value;
+let post balance_contract = global<coin::CoinStore<SupraCoin>>(contract_address).coin.value;
 aborts_if !(global<VestingContract>(contract_address).state == VESTING_POOL_TERMINATED);
 
diff --git a/aptos-move/framework/supra-framework/sources/account.move b/aptos-move/framework/supra-framework/sources/account.move index 5e7894a54a4fd..9c074faf99297 100644 --- a/aptos-move/framework/supra-framework/sources/account.move +++ b/aptos-move/framework/supra-framework/sources/account.move @@ -163,7 +163,7 @@ module supra_framework::account { const EACCOUNT_ALREADY_USED: u64 = 16; /// Offerer address doesn't exist const EOFFERER_ADDRESS_DOES_NOT_EXIST: u64 = 17; - /// The specified rotation capablity offer does not exist at the specified offerer address + /// The specified rotation capability offer does not exist at the specified offerer address const ENO_SUCH_ROTATION_CAPABILITY_OFFER: u64 = 18; // The signer capability is not offered to any address const ENO_SIGNER_CAPABILITY_OFFERED: u64 = 19; @@ -310,7 +310,7 @@ module supra_framework::account { /// Here is an example attack if we don't ask for the second signature `cap_update_table`: /// Alice has rotated her account `addr_a` to `new_addr_a`. As a result, the following entry is created, to help Alice when recovering her wallet: /// `OriginatingAddress[new_addr_a]` -> `addr_a` - /// Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. + /// Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. /// (Fortunately, she still has her secret key `new_sk_a` associated with her new address `new_addr_a`, so she can do this.) /// /// But Bob likes to mess with Alice. diff --git a/aptos-move/framework/supra-framework/sources/account.spec.move b/aptos-move/framework/supra-framework/sources/account.spec.move index f379f6605116e..26bc0efd85953 100644 --- a/aptos-move/framework/supra-framework/sources/account.spec.move +++ b/aptos-move/framework/supra-framework/sources/account.spec.move @@ -575,6 +575,7 @@ spec supra_framework::account { // This function should not abort assuming the result of `sha3_256` is deserializable into an address. aborts_if [abstract] false; ensures [abstract] result == spec_create_resource_address(source, seed); + ensures [abstract] source != result; // We can assume that the derived resource account does not equal to `source` } spec fun spec_create_resource_address(source: address, seed: vector): address; @@ -617,7 +618,7 @@ spec supra_framework::account { } /// The Account existed under the signer. - /// The guid_creation_num of the ccount resource is up to MAX_U64. + /// The guid_creation_num of the account resource is up to MAX_U64. spec create_guid(account_signer: &signer): guid::GUID { let addr = signer::address_of(account_signer); include NewEventHandleAbortsIf { diff --git a/aptos-move/framework/supra-framework/sources/aggregator_v2/aggregator_v2.spec.move b/aptos-move/framework/supra-framework/sources/aggregator_v2/aggregator_v2.spec.move index ecde426452056..2fdb1dbca0110 100644 --- a/aptos-move/framework/supra-framework/sources/aggregator_v2/aggregator_v2.spec.move +++ b/aptos-move/framework/supra-framework/sources/aggregator_v2/aggregator_v2.spec.move @@ -39,11 +39,33 @@ spec supra_framework::aggregator_v2 { pragma opaque; } + spec read_snapshot { + // TODO: temporary mockup. + pragma opaque; + } + + spec read_derived_string { + // TODO: temporary mockup. + pragma opaque; + } + + spec create_derived_string { + // TODO: temporary mockup. + pragma opaque; + } + + spec derive_string_concat { + // TODO: temporary mockup. + pragma opaque; + } + + // deprecated spec copy_snapshot { // TODO: temporary mockup. pragma opaque; } + // deprecated spec string_concat { // TODO: temporary mockup. pragma opaque; diff --git a/aptos-move/framework/supra-framework/sources/configs/config_buffer.spec.move b/aptos-move/framework/supra-framework/sources/configs/config_buffer.spec.move index e68efb0ee693a..03f5b60fddf73 100644 --- a/aptos-move/framework/supra-framework/sources/configs/config_buffer.spec.move +++ b/aptos-move/framework/supra-framework/sources/configs/config_buffer.spec.move @@ -32,7 +32,7 @@ spec supra_framework::config_buffer { let key = type_info::type_name(); aborts_if !simple_map::spec_contains_key(configs.configs, key); include any::UnpackAbortsIf { - x: simple_map::spec_get(configs.configs, key) + self: simple_map::spec_get(configs.configs, key) }; } @@ -51,7 +51,7 @@ spec supra_framework::config_buffer { let configs = global(@supra_framework); // TODO(#12015) include spec_fun_does_exist(type_name) ==> any::UnpackAbortsIf { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; } @@ -61,7 +61,7 @@ spec supra_framework::config_buffer { let configs = global(@supra_framework); // TODO(#12015) include spec_fun_does_exist(type_name) ==> any::UnpackRequirement { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; } diff --git a/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.move b/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.move index e86426a3c3e21..3480111caaae9 100644 --- a/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.move +++ b/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.move @@ -58,6 +58,16 @@ module supra_framework::dispatchable_fungible_asset { ); } + public fun register_derive_supply_dispatch_function( + constructor_ref: &ConstructorRef, + dispatch_function: Option + ) { + fungible_asset::register_derive_supply_dispatch_function( + constructor_ref, + dispatch_function + ); + } + /// Withdraw `amount` of the fungible asset from `store` by the owner. /// /// The semantics of deposit will be governed by the function specified in DispatchFunctionStore. @@ -162,6 +172,25 @@ module supra_framework::dispatchable_fungible_asset { } } + #[view] + /// Get the derived supply of the fungible asset using the overloaded hook. + /// + /// The semantics of supply will be governed by the function specified in DeriveSupplyDispatch. + public fun derived_supply(metadata: Object): Option { + let func_opt = fungible_asset::derived_supply_dispatch_function(metadata); + if (option::is_some(&func_opt)) { + assert!( + features::dispatchable_fungible_asset_enabled(), + error::aborted(ENOT_ACTIVATED) + ); + let func = option::borrow(&func_opt); + function_info::load_module_from_function(func); + dispatchable_derived_supply(metadata, func) + } else { + fungible_asset::supply(metadata) + } + } + inline fun borrow_transfer_ref(metadata: Object): &TransferRef acquires TransferRefStore { let metadata_addr = object::object_address( &fungible_asset::store_metadata(metadata) @@ -191,4 +220,9 @@ module supra_framework::dispatchable_fungible_asset { store: Object, function: &FunctionInfo, ): u64; + + native fun dispatchable_derived_supply( + store: Object, + function: &FunctionInfo, + ): Option; } diff --git a/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.spec.move b/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.spec.move index b5df8d7fafca6..60f78c0b32ef7 100644 --- a/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.spec.move +++ b/aptos-move/framework/supra-framework/sources/dispatchable_fungible_asset.spec.move @@ -14,4 +14,8 @@ spec supra_framework::dispatchable_fungible_asset { spec dispatchable_derived_balance{ pragma opaque; } + + spec dispatchable_derived_supply{ + pragma opaque; + } } diff --git a/aptos-move/framework/supra-framework/sources/fungible_asset.move b/aptos-move/framework/supra-framework/sources/fungible_asset.move index 966e73fcea509..cc9dca7223ad4 100644 --- a/aptos-move/framework/supra-framework/sources/fungible_asset.move +++ b/aptos-move/framework/supra-framework/sources/fungible_asset.move @@ -85,6 +85,8 @@ module supra_framework::fungible_asset { const EAPT_NOT_DISPATCHABLE: u64 = 31; /// Flag for Concurrent Supply not enabled const ECONCURRENT_BALANCE_NOT_ENABLED: u64 = 32; + /// Provided derived_supply function type doesn't meet the signature requirement. + const EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH: u64 = 33; // // Constants @@ -152,6 +154,11 @@ module supra_framework::fungible_asset { derived_balance_function: Option, } + #[resource_group_member(group = supra_framework::object::ObjectGroup)] + struct DeriveSupply has key { + dispatch_function: Option + } + #[resource_group_member(group = supra_framework::object::ObjectGroup)] /// The store object that holds concurrent fungible asset balance. struct ConcurrentFungibleBalance has key { @@ -348,28 +355,13 @@ module supra_framework::fungible_asset { ) ); }); - - // Cannot register hook for SUPRA. - assert!( - object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset, - error::permission_denied(EAPT_NOT_DISPATCHABLE) - ); - assert!( - !object::can_generate_delete_ref(constructor_ref), - error::invalid_argument(EOBJECT_IS_DELETABLE) - ); + register_dispatch_function_sanity_check(constructor_ref); assert!( !exists( object::address_from_constructor_ref(constructor_ref) ), error::already_exists(EALREADY_REGISTERED) ); - assert!( - exists( - object::address_from_constructor_ref(constructor_ref) - ), - error::not_found(EFUNGIBLE_METADATA_EXISTENCE), - ); let store_obj = &object::generate_signer(constructor_ref); @@ -384,6 +376,70 @@ module supra_framework::fungible_asset { ); } + /// Define the derived supply dispatch with the provided function. + public(friend) fun register_derive_supply_dispatch_function( + constructor_ref: &ConstructorRef, + dispatch_function: Option + ) { + // Verify that caller type matches callee type so wrongly typed function cannot be registered. + option::for_each_ref(&dispatch_function, |supply_function| { + let function_info = function_info::new_function_info_from_address( + @supra_framework, + string::utf8(b"dispatchable_fungible_asset"), + string::utf8(b"dispatchable_derived_supply"), + ); + // Verify that caller type matches callee type so wrongly typed function cannot be registered. + assert!( + function_info::check_dispatch_type_compatibility( + &function_info, + supply_function + ), + error::invalid_argument( + EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH + ) + ); + }); + register_dispatch_function_sanity_check(constructor_ref); + assert!( + !exists( + object::address_from_constructor_ref(constructor_ref) + ), + error::already_exists(EALREADY_REGISTERED) + ); + + + let store_obj = &object::generate_signer(constructor_ref); + + // Store the overload function hook. + move_to( + store_obj, + DeriveSupply { + dispatch_function + } + ); + } + + /// Check the requirements for registering a dispatchable function. + inline fun register_dispatch_function_sanity_check( + constructor_ref: &ConstructorRef, + ) { + // Cannot register hook for APT. + assert!( + object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset, + error::permission_denied(EAPT_NOT_DISPATCHABLE) + ); + assert!( + !object::can_generate_delete_ref(constructor_ref), + error::invalid_argument(EOBJECT_IS_DELETABLE) + ); + assert!( + exists( + object::address_from_constructor_ref(constructor_ref) + ), + error::not_found(EFUNGIBLE_METADATA_EXISTENCE), + ); + } + /// Creates a mint ref that can be used to mint fungible assets from the given fungible object's constructor ref. /// This can only be called at object creation time as constructor_ref is only available then. public fun generate_mint_ref(constructor_ref: &ConstructorRef): MintRef { @@ -625,6 +681,15 @@ module supra_framework::fungible_asset { } } + public(friend) fun derived_supply_dispatch_function(metadata: Object): Option acquires DeriveSupply { + let metadata_addr = object::object_address(&metadata); + if (exists(metadata_addr)) { + borrow_global(metadata_addr).dispatch_function + } else { + option::none() + } + } + public fun asset_metadata(fa: &FungibleAsset): Object { fa.metadata } @@ -895,19 +960,29 @@ module supra_framework::fungible_asset { let mutable_metadata = borrow_global_mut(metadata_address); if (option::is_some(&name)){ - mutable_metadata.name = option::extract(&mut name); + let name = option::extract(&mut name); + assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG)); + mutable_metadata.name = name; }; if (option::is_some(&symbol)){ - mutable_metadata.symbol = option::extract(&mut symbol); + let symbol = option::extract(&mut symbol); + assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG)); + mutable_metadata.symbol = symbol; }; if (option::is_some(&decimals)){ - mutable_metadata.decimals = option::extract(&mut decimals); + let decimals = option::extract(&mut decimals); + assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE)); + mutable_metadata.decimals = decimals; }; if (option::is_some(&icon_uri)){ - mutable_metadata.icon_uri = option::extract(&mut icon_uri); + let icon_uri = option::extract(&mut icon_uri); + assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.icon_uri = icon_uri; }; if (option::is_some(&project_uri)){ - mutable_metadata.project_uri = option::extract(&mut project_uri); + let project_uri = option::extract(&mut project_uri); + assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.project_uri = project_uri; }; } @@ -1246,13 +1321,13 @@ module supra_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::none(), option::none(), option::none() ); assert!(name(metadata) == string::utf8(b"mutated_name"), 8); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 9); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 9); assert!(decimals(metadata) == 0, 10); assert!(icon_uri(metadata) == string::utf8(b"http://www.example.com/favicon.ico"), 11); assert!(project_uri(metadata) == string::utf8(b"http://www.example.com"), 12); @@ -1327,13 +1402,13 @@ module supra_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::some(10), option::some(string::utf8(b"http://www.mutated-example.com/favicon.ico")), option::some(string::utf8(b"http://www.mutated-example.com")) ); assert!(name(metadata) == string::utf8(b"mutated_name"), 1); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 2); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 2); assert!(decimals(metadata) == 10, 3); assert!(icon_uri(metadata) == string::utf8(b"http://www.mutated-example.com/favicon.ico"), 4); assert!(project_uri(metadata) == string::utf8(b"http://www.mutated-example.com"), 5); @@ -1349,18 +1424,115 @@ module supra_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::none(), option::none(), option::none() ); assert!(name(metadata) == string::utf8(b"mutated_name"), 8); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 9); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 9); assert!(decimals(metadata) == 0, 10); assert!(icon_uri(metadata) == string::utf8(b"http://www.example.com/favicon.ico"), 11); assert!(project_uri(metadata) == string::utf8(b"http://www.example.com"), 12); } + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x2000f, location = Self)] + fun test_mutate_metadata_name_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::some(string::utf8(b"mutated_name_will_be_too_long_for_the_maximum_length_check")), + option::none(), + option::none(), + option::none(), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20010, location = Self)] + fun test_mutate_metadata_symbol_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::some(string::utf8(b"mutated_symbol_will_be_too_long_for_the_maximum_length_check")), + option::none(), + option::none(), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20011, location = Self)] + fun test_mutate_metadata_decimals_over_maximum_amount( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::some(50), + option::none(), + option::none() + ); + } + + #[test_only] + fun create_exceedingly_long_uri(): vector { + use std::vector; + + let too_long_of_uri = b"mutated_uri_will_be_too_long_for_the_maximum_length_check.com/"; + for (i in 0..50) { + vector::append(&mut too_long_of_uri, b"too_long_of_uri"); + }; + + too_long_of_uri + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20013, location = Self)] + fun test_mutate_metadata_icon_uri_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + let too_long_of_uri = create_exceedingly_long_uri(); + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::none(), + option::some(string::utf8(too_long_of_uri)), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20013, location = Self)] + fun test_mutate_metadata_project_uri_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + let too_long_of_uri = create_exceedingly_long_uri(); + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::none(), + option::none(), + option::some(string::utf8(too_long_of_uri)) + ); + } + #[test(creator = @0xcafe)] fun test_merge_and_exact(creator: &signer) acquires Supply, ConcurrentSupply { let (mint_ref, _transfer_ref, burn_ref, _mutate_metadata_ref, _) = create_fungible_asset(creator); diff --git a/aptos-move/framework/supra-framework/sources/jwks.move b/aptos-move/framework/supra-framework/sources/jwks.move index 11a684633a487..5184bdc85c740 100644 --- a/aptos-move/framework/supra-framework/sources/jwks.move +++ b/aptos-move/framework/supra-framework/sources/jwks.move @@ -5,9 +5,11 @@ /// write some of the resources in this file. As a result, the structs in this file are declared so as to /// have a simple layout which is easily accessible in Rust. module supra_framework::jwks { + use std::bcs; use std::error; use std::option; use std::option::Option; + use std::signer; use std::string; use std::string::{String, utf8}; use std::vector; @@ -25,12 +27,19 @@ module supra_framework::jwks { friend supra_framework::genesis; friend supra_framework::reconfiguration_with_dkg; + /// We limit the size of a `PatchedJWKs` resource installed by a dapp owner for federated keyless accounts. + /// Note: If too large, validators waste work reading it for invalid TXN signatures. + const MAX_FEDERATED_JWKS_SIZE_BYTES: u64 = 2 * 1024; // 2 KiB + const EUNEXPECTED_EPOCH: u64 = 1; const EUNEXPECTED_VERSION: u64 = 2; const EUNKNOWN_PATCH_VARIANT: u64 = 3; const EUNKNOWN_JWK_VARIANT: u64 = 4; const EISSUER_NOT_FOUND: u64 = 5; const EJWK_ID_NOT_FOUND: u64 = 6; + const EINSTALL_FEDERATED_JWKS_AT_SUPRA_FRAMEWORK: u64 = 7; + const EFEDERATED_JWKS_TOO_LARGE: u64 = 8; + const EINVALID_FEDERATED_JWK_SET: u64 = 9; const ENATIVE_MISSING_RESOURCE_VALIDATOR_SET: u64 = 0x0101; const ENATIVE_MISSING_RESOURCE_OBSERVED_JWKS: u64 = 0x0102; @@ -155,11 +164,115 @@ module supra_framework::jwks { jwks: AllProvidersJWKs, } + /// JWKs for federated keyless accounts are stored in this resource. + struct FederatedJWKs has drop, key { + jwks: AllProvidersJWKs, + } + // // Structs end. // Functions begin. // + /// Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS + /// Cognito, etc). For type-safety, we explicitly use a `struct FederatedJWKs { jwks: AllProviderJWKs }` instead of + /// reusing `PatchedJWKs { jwks: AllProviderJWKs }`, which is a JWK-consensus-specific struct. + public fun patch_federated_jwks(jwk_owner: &signer, patches: vector) acquires FederatedJWKs { + // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Supra framework address. + assert!(!system_addresses::is_supra_framework_address(signer::address_of(jwk_owner)), + error::invalid_argument(EINSTALL_FEDERATED_JWKS_AT_SUPRA_FRAMEWORK) + ); + + let jwk_addr = signer::address_of(jwk_owner); + if (!exists(jwk_addr)) { + move_to(jwk_owner, FederatedJWKs { jwks: AllProvidersJWKs { entries: vector[] } }); + }; + + let fed_jwks = borrow_global_mut(jwk_addr); + vector::for_each_ref(&patches, |obj|{ + let patch: &Patch = obj; + apply_patch(&mut fed_jwks.jwks, *patch); + }); + + // TODO: Can we check the size more efficiently instead of serializing it via BCS? + let num_bytes = vector::length(&bcs::to_bytes(fed_jwks)); + assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE)); + } + + /// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should + /// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + /// + /// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. + /// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. + /// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + /// + /// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - + /// ```json + /// { + /// "keys": [ + /// { + /// "alg": "RS256", + /// "use": "sig", + /// "kty": "RSA", + /// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", + /// "kid": "d7b939771a7800c413f90051012d975981916d71", + /// "e": "AQAB" + /// }, + /// { + /// "kty": "RSA", + /// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", + /// "alg": "RS256", + /// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", + /// "e": "AQAB", + /// "use": "sig" + /// } + /// ] + /// } + /// ``` + /// + /// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector + /// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 the + /// the corresponding attribute in the second JWK as shown below. + /// + /// ```move + /// use std::string::utf8; + /// supra_framework::jwks::update_federated_jwk_set( + /// jwk_owner, + /// b"https://accounts.google.com", + /// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], + /// vector[utf8(b"RS256"), utf8(b"RS256")], + /// vector[utf8(b"AQAB"), utf8(b"AQAB")], + /// vector[ + /// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), + /// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") + /// ] + /// ) + /// ``` + /// + /// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + /// + /// NOTE: Currently only RSA keys are supported. + public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector, kid_vec: vector, alg_vec: vector, e_vec: vector, n_vec: vector) acquires FederatedJWKs { + assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + let num_jwk = vector::length(&kid_vec); + assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + + let remove_all_patch = new_patch_remove_all(); + let patches = vector[remove_all_patch]; + while (!vector::is_empty(&kid_vec)) { + let kid = vector::pop_back(&mut kid_vec); + let alg = vector::pop_back(&mut alg_vec); + let e = vector::pop_back(&mut e_vec); + let n = vector::pop_back(&mut n_vec); + let jwk = new_rsa_jwk(kid, alg, e, n); + let patch = new_patch_upsert_jwk(iss, jwk); + vector::push_back(&mut patches, patch) + }; + patch_federated_jwks(jwk_owner, patches); + } + /// Get a JWK by issuer and key ID from the `PatchedJWKs`. /// Abort if such a JWK does not exist. /// More convenient to call from Rust, since it does not wrap the JWK in an `Option`. @@ -196,7 +309,7 @@ module supra_framework::jwks { /// b"https://accounts.google.com", /// b"https://accounts.google.com/.well-known/openid-configuration" /// ); - /// supra_framework::aptos_governance::reconfigure(&framework_signer); + /// supra_framework::supra_governance::reconfigure(&framework_signer); /// ``` public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector, config_url: vector): Option> acquires SupportedOIDCProviders { system_addresses::assert_supra_framework(fx); @@ -231,7 +344,7 @@ module supra_framework::jwks { /// &framework_signer, /// b"https://accounts.google.com", /// ); - /// supra_framework::aptos_governance::reconfigure(&framework_signer); + /// supra_framework::supra_governance::reconfigure(&framework_signer); /// ``` public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector): Option> acquires SupportedOIDCProviders { system_addresses::assert_supra_framework(fx); @@ -382,7 +495,7 @@ module supra_framework::jwks { *borrow_global_mut(@supra_framework) = PatchedJWKs { jwks }; } - /// Get a JWK by issuer and key ID from a `AllProvidersJWKs`, if it exists. + /// Get a JWK by issuer and key ID from an `AllProvidersJWKs`, if it exists. fun try_get_jwk_by_issuer(jwks: &AllProvidersJWKs, issuer: vector, jwk_id: vector): Option { let (issuer_found, index) = vector::find(&jwks.entries, |obj| { let provider_jwks: &ProviderJWKs = obj; diff --git a/aptos-move/framework/supra-framework/sources/object.move b/aptos-move/framework/supra-framework/sources/object.move index b71a844345af8..3d85120489775 100644 --- a/aptos-move/framework/supra-framework/sources/object.move +++ b/aptos-move/framework/supra-framework/sources/object.move @@ -50,6 +50,8 @@ module supra_framework::object { const EOBJECT_NOT_BURNT: u64 = 8; /// Object is untransferable any operations that might result in a transfer are disallowed. const EOBJECT_NOT_TRANSFERRABLE: u64 = 9; + /// Objects cannot be burnt + const EBURN_NOT_ALLOWED: u64 = 10; /// Explicitly separate the GUID space between Object and Account to prevent accidental overlap. const INIT_GUID_CREATION_NUM: u64 = 0x4000000000000; @@ -610,15 +612,12 @@ module supra_framework::object { }; } - /// Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. - /// This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. - /// Original owners can reclaim burnt objects any time in the future by calling unburn. - public entry fun burn(owner: &signer, object: Object) acquires ObjectCore { - let original_owner = signer::address_of(owner); - assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); - let object_addr = object.inner; - move_to(&create_signer(object_addr), TombStone { original_owner }); - transfer_raw_inner(object_addr, BURN_ADDRESS); + #[deprecated] + /// Previously allowed to burn objects, has now been disabled. Objects can still be unburnt. + /// + /// Please use the test only [`object::burn_object`] for testing with previously burned objects. + public entry fun burn(_owner: &signer, _object: Object) { + abort error::permission_denied(EBURN_NOT_ALLOWED) } /// Allow origin owners to reclaim any objects they previous burnt. @@ -705,6 +704,20 @@ module supra_framework::object { #[test_only] const EWEAPON_DOES_NOT_EXIST: u64 = 0x101; + #[test_only] + /// For testing the previous behavior of `object::burn()` + /// + /// Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. + /// This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. + /// Original owners can reclaim burnt objects any time in the future by calling unburn. + public fun burn_object(owner: &signer, object: Object) acquires ObjectCore { + let original_owner = signer::address_of(owner); + assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); + let object_addr = object.inner; + move_to(&create_signer(object_addr), TombStone { original_owner }); + transfer_raw_inner(object_addr, BURN_ADDRESS); + } + #[test_only] struct HeroEquipEvent has drop, store { weapon_id: Option>, @@ -820,7 +833,7 @@ module supra_framework::object { #[expected_failure(abort_code = 0x10008, location = Self)] fun test_cannot_unburn_after_transfer_with_ref(creator: &signer) acquires ObjectCore, TombStone { let (hero_constructor, hero) = create_hero(creator); - burn(creator, hero); + burn_object(creator, hero); let transfer_ref = generate_transfer_ref(&hero_constructor); transfer_with_ref(generate_linear_transfer_ref(&transfer_ref), @0x456); unburn(creator, hero); @@ -876,7 +889,7 @@ module supra_framework::object { disable_ungated_transfer(&transfer_ref); // Owner should be able to burn, despite ungated transfer disallowed. - burn(creator, hero); + burn_object(creator, hero); assert!(owner(hero) == BURN_ADDRESS, 0); assert!(!ungated_transfer_allowed(hero), 0); @@ -897,7 +910,7 @@ module supra_framework::object { // Owner should be not be able to burn weapon directly. assert!(owner(weapon) == object_address(&hero), 0); assert!(owns(weapon, signer::address_of(creator)), 0); - burn(creator, weapon); + burn_object(creator, weapon); } #[test(creator = @0x123)] @@ -907,6 +920,13 @@ module supra_framework::object { unburn(creator, hero); } + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x5000A, location = Self)] + fun test_burn_should_fail(creator: &signer) acquires ObjectCore { + let (_, hero) = create_hero(creator); + burn(creator, hero); + } + #[test_only] fun create_simple_object(creator: &signer, seed: vector): Object { object_from_constructor_ref(&create_named_object(creator, seed)) diff --git a/aptos-move/framework/supra-framework/sources/object.spec.move b/aptos-move/framework/supra-framework/sources/object.spec.move index 12cad2fec0936..8d5c88819249a 100644 --- a/aptos-move/framework/supra-framework/sources/object.spec.move +++ b/aptos-move/framework/supra-framework/sources/object.spec.move @@ -475,7 +475,11 @@ spec supra_framework::object { aborts_if !global(object_address).allow_ungated_transfer; } - spec burn(owner: &signer, object: Object) { + spec burn(_owner: &signer, _object: Object) { + aborts_if true; + } + + spec burn_object(owner: &signer, object: Object) { pragma aborts_if_is_partial; let object_address = object.inner; aborts_if !exists(object_address); diff --git a/aptos-move/framework/supra-framework/sources/primary_fungible_store.move b/aptos-move/framework/supra-framework/sources/primary_fungible_store.move index 13a81a91ee45c..e407c4c442a13 100644 --- a/aptos-move/framework/supra-framework/sources/primary_fungible_store.move +++ b/aptos-move/framework/supra-framework/sources/primary_fungible_store.move @@ -372,7 +372,7 @@ module supra_framework::primary_fungible_store { // User 2 burns their primary store but should still be able to transfer afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); // Balance still works assert!(balance(user_2_address, metadata) == 80, 0); @@ -396,7 +396,7 @@ module supra_framework::primary_fungible_store { // User 2 burns their primary store but should still be able to withdraw afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); let coins = withdraw(user_2, metadata, 70); assert!(balance(user_2_address, metadata) == 10, 0); diff --git a/aptos-move/framework/supra-framework/sources/reconfiguration_state.spec.move b/aptos-move/framework/supra-framework/sources/reconfiguration_state.spec.move index 8b6d175e2c23f..494e929cd463e 100644 --- a/aptos-move/framework/supra-framework/sources/reconfiguration_state.spec.move +++ b/aptos-move/framework/supra-framework/sources/reconfiguration_state.spec.move @@ -100,7 +100,7 @@ spec supra_framework::reconfiguration_state { include copyable_any::type_name(global(@supra_framework).variant).bytes == b"0x1::reconfiguration_state::StateActive" ==> copyable_any::UnpackAbortsIf { - x: global(@supra_framework).variant + self: global(@supra_framework).variant }; aborts_if copyable_any::type_name(global(@supra_framework).variant).bytes != b"0x1::reconfiguration_state::StateActive"; diff --git a/aptos-move/framework/supra-framework/sources/resource_account.spec.move b/aptos-move/framework/supra-framework/sources/resource_account.spec.move index bdd2dca1c0b24..8528a64fb0d00 100644 --- a/aptos-move/framework/supra-framework/sources/resource_account.spec.move +++ b/aptos-move/framework/supra-framework/sources/resource_account.spec.move @@ -155,8 +155,6 @@ spec supra_framework::resource_account { let get = len(optional_auth_key) == 0; let account = global(source_addr); - requires source_addr != resource_addr; - aborts_if len(ZERO_AUTH_KEY) != 32; include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; diff --git a/aptos-move/framework/supra-framework/sources/staking_contract.move b/aptos-move/framework/supra-framework/sources/staking_contract.move index ec25f5c000bf0..19a629c489fc1 100644 --- a/aptos-move/framework/supra-framework/sources/staking_contract.move +++ b/aptos-move/framework/supra-framework/sources/staking_contract.move @@ -60,7 +60,7 @@ module supra_framework::staking_contract { const EINSUFFICIENT_ACTIVE_STAKE_TO_WITHDRAW: u64 = 7; /// Caller must be either the staker, operator, or beneficiary. const ENOT_STAKER_OR_OPERATOR_OR_BENEFICIARY: u64 = 8; - /// Chaning beneficiaries for operators is not supported. + /// Changing beneficiaries for operators is not supported. const EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED: u64 = 9; /// Maximum number of distributions a stake pool can support. diff --git a/aptos-move/framework/supra-framework/sources/staking_contract.spec.move b/aptos-move/framework/supra-framework/sources/staking_contract.spec.move index 4cac10820d783..6a02d74345b7f 100644 --- a/aptos-move/framework/supra-framework/sources/staking_contract.spec.move +++ b/aptos-move/framework/supra-framework/sources/staking_contract.spec.move @@ -72,6 +72,10 @@ spec supra_framework::staking_contract { pragma aborts_if_is_strict; } + spec StakingContract { + invariant commission_percentage >= 0 && commission_percentage <= 100; + } + spec stake_pool_address(staker: address, operator: address): address { include ContractExistsAbortsIf; let staking_contracts = global(staker).staking_contracts; @@ -97,7 +101,6 @@ spec supra_framework::staking_contract { spec staking_contract_amounts(staker: address, operator: address): (u64, u64, u64) { // TODO: set because of timeout (property proved). pragma verify_duration_estimate = 120; - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; let staking_contracts = global(staker).staking_contracts; let staking_contract = simple_map::spec_get(staking_contracts, operator); @@ -228,6 +231,7 @@ spec supra_framework::staking_contract { let post new_delegated_voter = global(pool_address).delegated_voter; // property 4: The staker may update the voter of a staking contract, enabling them // to modify the assigned voter address and ensure it accurately reflects their desired choice. + /// [high-level-req-4] ensures new_delegated_voter == new_voter; } @@ -275,8 +279,6 @@ spec supra_framework::staking_contract { // TODO: Call `update_distribution_pool` and could not verify `update_distribution_pool`. // TODO: Set because of timeout (estimate unknown). pragma verify = false; - /// [high-level-req-4] - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; let staker_address = signer::address_of(staker); let staking_contracts = global(staker_address).staking_contracts; let staking_contract = simple_map::spec_get(staking_contracts, operator); @@ -287,7 +289,6 @@ spec supra_framework::staking_contract { // TODO: Call `update_distribution_pool` and could not verify `update_distribution_pool`. // TODO: Set because of timeout (estimate unknown). pragma verify = false; - requires amount > 0; let staker_address = signer::address_of(staker); include ContractExistsAbortsIf { staker: staker_address }; } @@ -441,7 +442,7 @@ spec supra_framework::staking_contract { } /// The Account exists under the staker. - /// The guid_creation_num of the ccount resource is up to MAX_U64. + /// The guid_creation_num of the account resource is up to MAX_U64. spec new_staking_contracts_holder(staker: &signer): Store { include NewStakingContractsHolderAbortsIf; } diff --git a/aptos-move/framework/supra-framework/sources/supra_account.move b/aptos-move/framework/supra-framework/sources/supra_account.move index a5f475d4e9ab2..76a975ac5bb6c 100644 --- a/aptos-move/framework/supra-framework/sources/supra_account.move +++ b/aptos-move/framework/supra-framework/sources/supra_account.move @@ -198,7 +198,7 @@ module supra_framework::supra_account { /// This would create the recipient SUPRA PFS first, which also registers it to receive SUPRA, before transferring. /// TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way /// to transfer SUPRA) - if we want to allow SUPRA PFS without account itself - fun fungible_transfer_only( + public(friend) entry fun fungible_transfer_only( source: &signer, to: address, amount: u64 ) { let sender_store = ensure_primary_fungible_store_exists(signer::address_of(source)); diff --git a/aptos-move/framework/supra-framework/sources/supra_account.spec.move b/aptos-move/framework/supra-framework/sources/supra_account.spec.move index 84e2e0a99823b..8c4473e9fa7a4 100644 --- a/aptos-move/framework/supra-framework/sources/supra_account.spec.move +++ b/aptos-move/framework/supra-framework/sources/supra_account.spec.move @@ -90,9 +90,6 @@ spec supra_framework::supra_account { pragma verify = false; let account_addr_source = signer::address_of(source); - // The 'from' addr is implictly not equal to 'to' addr - requires account_addr_source != to; - include CreateAccountTransferAbortsIf; include GuidAbortsIf; include WithdrawAbortsIf{from: source}; @@ -131,10 +128,10 @@ spec supra_framework::supra_account { let coin_store_source = global>(account_addr_source); let balance_source = coin_store_source.coin.value; - requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; - requires exists i in 0..len(recipients): - amounts[i] > 0; + // requires forall i in 0..len(recipients): + // recipients[i] != account_addr_source; + // requires exists i in 0..len(recipients): + // amounts[i] > 0; // create account properties aborts_if len(recipients) != len(amounts); @@ -182,11 +179,11 @@ spec supra_framework::supra_account { let coin_store_source = global>(account_addr_source); let balance_source = coin_store_source.coin.value; - requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; - - requires exists i in 0..len(recipients): - amounts[i] > 0; + // requires forall i in 0..len(recipients): + // recipients[i] != account_addr_source; + // + // requires exists i in 0..len(recipients): + // amounts[i] > 0; /// [high-level-req-7] aborts_if len(recipients) != len(amounts); @@ -246,8 +243,6 @@ spec supra_framework::supra_account { pragma verify = false; let account_addr_source = signer::address_of(from); - //The 'from' addr is implictly not equal to 'to' addr - requires account_addr_source != to; include CreateAccountTransferAbortsIf; include WithdrawAbortsIf; diff --git a/aptos-move/framework/supra-framework/sources/transaction_validation.move b/aptos-move/framework/supra-framework/sources/transaction_validation.move index a8aa5efc1a01b..cfd6a7ee30d0f 100644 --- a/aptos-move/framework/supra-framework/sources/transaction_validation.move +++ b/aptos-move/framework/supra-framework/sources/transaction_validation.move @@ -79,6 +79,7 @@ module supra_framework::transaction_validation { txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, + is_simulation: bool, ) { assert!( timestamp::now_seconds() < txn_expiration_time, @@ -95,10 +96,13 @@ module supra_framework::transaction_validation { || txn_sequence_number > 0 ) { assert!(account::exists_at(transaction_sender), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST)); - assert!( - txn_authentication_key == account::get_authentication_key(transaction_sender), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if (!features::transaction_simulation_enhancement_enabled() || + !skip_auth_key_check(is_simulation, &txn_authentication_key)) { + assert!( + txn_authentication_key == account::get_authentication_key(transaction_sender), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ) + }; let account_sequence_number = account::get_sequence_number(transaction_sender); assert!( @@ -123,24 +127,29 @@ module supra_framework::transaction_validation { error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW) ); - assert!( - txn_authentication_key == bcs::to_bytes(&transaction_sender), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if (!features::transaction_simulation_enhancement_enabled() || + !skip_auth_key_check(is_simulation, &txn_authentication_key)) { + assert!( + txn_authentication_key == bcs::to_bytes(&transaction_sender), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ); + } }; let max_transaction_fee = txn_gas_price * txn_max_gas_units; - if (features::operations_default_to_fa_supra_store_enabled()) { - assert!( - supra_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); - } else { - assert!( - coin::is_balance_at_least(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); + if (!features::transaction_simulation_enhancement_enabled() || !skip_gas_payment(is_simulation, gas_payer)) { + if (features::operations_default_to_fa_supra_store_enabled()) { + assert!( + supra_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee), + error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) + ); + } else { + assert!( + coin::is_balance_at_least(gas_payer, max_transaction_fee), + error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) + ); + } } } @@ -155,6 +164,7 @@ module supra_framework::transaction_validation { _script_hash: vector, ) { let gas_payer = signer::address_of(&sender); + // prologue_common with is_simulation set to false behaves identically to the original script_prologue function. prologue_common( sender, gas_payer, @@ -163,7 +173,36 @@ module supra_framework::transaction_validation { txn_gas_price, txn_max_gas_units, txn_expiration_time, - chain_id + chain_id, + false, + ) + } + + // This function extends the script_prologue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_public_key: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + _script_hash: vector, + is_simulation: bool, + ) { + let gas_payer = signer::address_of(&sender); + prologue_common( + sender, + gas_payer, + txn_sequence_number, + txn_public_key, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, ) } @@ -179,6 +218,8 @@ module supra_framework::transaction_validation { chain_id: u8, ) { let sender_addr = signer::address_of(&sender); + // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the + // original multi_agent_script_prologue function. prologue_common( sender, sender_addr, @@ -188,13 +229,45 @@ module supra_framework::transaction_validation { txn_max_gas_units, txn_expiration_time, chain_id, + false, ); - multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, false); + } + + // This function extends the multi_agent_script_prologue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun multi_agent_script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + let sender_addr = signer::address_of(&sender); + prologue_common( + sender, + sender_addr, + txn_sequence_number, + txn_sender_public_key, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation); } fun multi_agent_common_prologue( secondary_signer_addresses: vector
, secondary_signer_public_key_hashes: vector>, + is_simulation: bool, ) { let num_secondary_signers = vector::length(&secondary_signer_addresses); assert!( @@ -207,9 +280,10 @@ module supra_framework::transaction_validation { spec { invariant i <= num_secondary_signers; invariant forall j in 0..i: - account::exists_at(secondary_signer_addresses[j]) - && secondary_signer_public_key_hashes[j] - == account::get_authentication_key(secondary_signer_addresses[j]); + account::exists_at(secondary_signer_addresses[j]); + invariant forall j in 0..i: + secondary_signer_public_key_hashes[j] == account::get_authentication_key(secondary_signer_addresses[j]) || + (features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(secondary_signer_public_key_hashes[j])); }; (i < num_secondary_signers) }) { @@ -217,10 +291,13 @@ module supra_framework::transaction_validation { assert!(account::exists_at(secondary_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST)); let signer_public_key_hash = *vector::borrow(&secondary_signer_public_key_hashes, i); - assert!( - signer_public_key_hash == account::get_authentication_key(secondary_address), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if (!features::transaction_simulation_enhancement_enabled() || + !skip_auth_key_check(is_simulation, &signer_public_key_hash)) { + assert!( + signer_public_key_hash == account::get_authentication_key(secondary_address), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ) + }; i = i + 1; } } @@ -239,6 +316,8 @@ module supra_framework::transaction_validation { chain_id: u8, ) { assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED)); + // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the + // original fee_payer_script_prologue function. prologue_common( sender, fee_payer_address, @@ -248,14 +327,54 @@ module supra_framework::transaction_validation { txn_max_gas_units, txn_expiration_time, chain_id, + false, ); - multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, false); assert!( fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address), error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), ); } + // This function extends the fee_payer_script_prologue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun fee_payer_script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + fee_payer_address: address, + fee_payer_public_key_hash: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED)); + prologue_common( + sender, + fee_payer_address, + txn_sequence_number, + txn_sender_public_key, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation); + if (!features::transaction_simulation_enhancement_enabled() || + !skip_auth_key_check(is_simulation, &fee_payer_public_key_hash)) { + assert!( + fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ) + } + } + /// Epilogue function is run after a transaction is successfully executed. /// Called by the Adapter fun epilogue( @@ -263,12 +382,27 @@ module supra_framework::transaction_validation { storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, ) { let addr = signer::address_of(&account); epilogue_gas_payer(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining); } + // This function extends the epilogue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun epilogue_extended( + account: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, + ) { + let addr = signer::address_of(&account); + epilogue_gas_payer_extended(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining, is_simulation); + } + /// Epilogue function with explicit gas payer specified, is run after a transaction is successfully executed. /// Called by the Adapter fun epilogue_gas_payer( @@ -277,7 +411,32 @@ module supra_framework::transaction_validation { storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, + ) { + // epilogue_gas_payer_extended with is_simulation set to false behaves identically to the original + // epilogue_gas_payer function. + epilogue_gas_payer_extended( + account, + gas_payer, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining, + false, + ); + } + + // This function extends the epilogue_gas_payer by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun epilogue_gas_payer_extended( + account: signer, + gas_payer: address, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, ) { assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS)); let gas_used = txn_max_gas_units - gas_units_remaining; @@ -290,43 +449,53 @@ module supra_framework::transaction_validation { // it's important to maintain the error code consistent with vm // to do failed transaction cleanup. - if (features::operations_default_to_fa_supra_store_enabled()) { - assert!( - supra_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount), - error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), - ); - } else { - assert!( - coin::is_balance_at_least(gas_payer, transaction_fee_amount), - error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), - ); - }; + if (!features::transaction_simulation_enhancement_enabled() || !skip_gas_payment(is_simulation, gas_payer)) { + if (features::operations_default_to_fa_supra_store_enabled()) { + assert!( + supra_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount), + error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), + ); + } else { + assert!( + coin::is_balance_at_least(gas_payer, transaction_fee_amount), + error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), + ); + }; - let amount_to_burn = if (features::collect_and_distribute_gas_fees()) { - // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track - // it separately, so that we don't increase the total supply by refunding. + let amount_to_burn = if (features::collect_and_distribute_gas_fees()) { + // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track + // it separately, so that we don't increase the total supply by refunding. - // If transaction fees are redistributed to validators, collect them here for - // later redistribution. - transaction_fee::collect_fee(gas_payer, transaction_fee_amount); - 0 - } else { - // Otherwise, just burn the fee. - // TODO: this branch should be removed completely when transaction fee collection - // is tested and is fully proven to work well. - transaction_fee_amount - }; + // If transaction fees are redistributed to validators, collect them here for + // later redistribution. + transaction_fee::collect_fee(gas_payer, transaction_fee_amount); + 0 + } else { + // Otherwise, just burn the fee. + // TODO: this branch should be removed completely when transaction fee collection + // is tested and is fully proven to work well. + transaction_fee_amount + }; - if (amount_to_burn > storage_fee_refunded) { - let burn_amount = amount_to_burn - storage_fee_refunded; - transaction_fee::burn_fee(gas_payer, burn_amount); - } else if (amount_to_burn < storage_fee_refunded) { - let mint_amount = storage_fee_refunded - amount_to_burn; - transaction_fee::mint_and_refund(gas_payer, mint_amount) + if (amount_to_burn > storage_fee_refunded) { + let burn_amount = amount_to_burn - storage_fee_refunded; + transaction_fee::burn_fee(gas_payer, burn_amount); + } else if (amount_to_burn < storage_fee_refunded) { + let mint_amount = storage_fee_refunded - amount_to_burn; + transaction_fee::mint_and_refund(gas_payer, mint_amount) + }; }; // Increment sequence number let addr = signer::address_of(&account); account::increment_sequence_number(addr); } + + inline fun skip_auth_key_check(is_simulation: bool, auth_key: &vector): bool { + is_simulation && vector::is_empty(auth_key) + } + + inline fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool { + is_simulation && gas_payer == @0x0 + } } diff --git a/aptos-move/framework/supra-framework/sources/transaction_validation.spec.move b/aptos-move/framework/supra-framework/sources/transaction_validation.spec.move index 03e23c6c06dc7..991e3140df68d 100644 --- a/aptos-move/framework/supra-framework/sources/transaction_validation.spec.move +++ b/aptos-move/framework/supra-framework/sources/transaction_validation.spec.move @@ -106,13 +106,14 @@ spec supra_framework::transaction_validation { txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include PrologueCommonAbortsIf; } - spec script_prologue( + spec script_prologue_extended( sender: signer, txn_sequence_number: u64, txn_public_key: vector, @@ -121,6 +122,7 @@ spec supra_framework::transaction_validation { txn_expiration_time: u64, chain_id: u8, _script_hash: vector, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; @@ -130,9 +132,24 @@ spec supra_framework::transaction_validation { }; } + spec script_prologue( + sender: signer, + txn_sequence_number: u64, + txn_public_key: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + _script_hash: vector, + ) { + // TODO: temporary mockup + pragma verify = false; + } + spec schema MultiAgentPrologueCommonAbortsIf { secondary_signer_addresses: vector
; secondary_signer_public_key_hashes: vector>; + is_simulation: bool; // Vectors to be `zipped with` should be of equal length. let num_secondary_signers = len(secondary_signer_addresses); @@ -142,30 +159,38 @@ spec supra_framework::transaction_validation { // property 2: All secondary signer addresses are verified to be authentic through a validation process. /// [high-level-req-2] aborts_if exists i in 0..num_secondary_signers: - !account::exists_at(secondary_signer_addresses[i]) - || secondary_signer_public_key_hashes[i] != - account::get_authentication_key(secondary_signer_addresses[i]); - + !account::exists_at(secondary_signer_addresses[i]); + aborts_if exists i in 0..num_secondary_signers: + !can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]) && + secondary_signer_public_key_hashes[i] != + account::get_authentication_key(secondary_signer_addresses[i]); // By the end, all secondary signers account should exist and public key hash should match. ensures forall i in 0..num_secondary_signers: - account::exists_at(secondary_signer_addresses[i]) - && secondary_signer_public_key_hashes[i] == - account::get_authentication_key(secondary_signer_addresses[i]); + account::exists_at(secondary_signer_addresses[i]); + ensures forall i in 0..num_secondary_signers: + secondary_signer_public_key_hashes[i] == account::get_authentication_key(secondary_signer_addresses[i]) + || can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]); + } + + spec fun can_skip(feature_flag: bool, is_simulation: bool, auth_key: vector): bool { + features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(auth_key) } spec multi_agent_common_prologue( secondary_signer_addresses: vector
, secondary_signer_public_key_hashes: vector>, + is_simulation: bool, ) { include MultiAgentPrologueCommonAbortsIf { secondary_signer_addresses, secondary_signer_public_key_hashes, + is_simulation, }; } /// Aborts if length of public key hashed vector /// not equal the number of singers. - spec multi_agent_script_prologue ( + spec multi_agent_script_prologue_extended( sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector, @@ -175,6 +200,7 @@ spec supra_framework::transaction_validation { txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, + is_simulation: bool, ) { pragma verify_duration_estimate = 120; let gas_payer = signer::address_of(sender); @@ -188,10 +214,26 @@ spec supra_framework::transaction_validation { include MultiAgentPrologueCommonAbortsIf { secondary_signer_addresses, secondary_signer_public_key_hashes, + is_simulation, }; } - spec fee_payer_script_prologue( + spec multi_agent_script_prologue( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec fee_payer_script_prologue_extended( sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector, @@ -203,6 +245,7 @@ spec supra_framework::transaction_validation { txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, + is_simulation: bool, ) { pragma verify_duration_estimate = 120; @@ -216,6 +259,7 @@ spec supra_framework::transaction_validation { include MultiAgentPrologueCommonAbortsIf { secondary_signer_addresses, secondary_signer_public_key_hashes, + is_simulation, }; aborts_if !account::exists_at(gas_payer); @@ -223,37 +267,79 @@ spec supra_framework::transaction_validation { aborts_if !features::spec_fee_payer_enabled(); } + spec fee_payer_script_prologue( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + fee_payer_address: address, + fee_payer_public_key_hash: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + ) { + // TODO: temporary mockup + pragma verify = false; + } + /// Abort according to the conditions. /// `SupraCoinCapabilities` and `CoinInfo` should exists. /// Skip transaction_fee::burn_fee verification. - spec epilogue( + spec epilogue_extended( account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include EpilogueGasPayerAbortsIf { gas_payer: signer::address_of(account) }; } + spec epilogue( + account: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + ) { + // TODO: temporary mockup + pragma verify = false; + } + /// Abort according to the conditions. /// `SupraCoinCapabilities` and `CoinInfo` should exist. /// Skip transaction_fee::burn_fee verification. - spec epilogue_gas_payer( + spec epilogue_gas_payer_extended( account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include EpilogueGasPayerAbortsIf; } + spec epilogue_gas_payer( + account: signer, + gas_payer: address, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + ) { + // TODO: temporary mockup + pragma verify = false; + } + spec schema EpilogueGasPayerAbortsIf { use std::option; use aptos_std::type_info; diff --git a/aptos-move/framework/supra-framework/sources/vesting.spec.move b/aptos-move/framework/supra-framework/sources/vesting.spec.move index 0de9e9066db0b..fbf7822e5c791 100644 --- a/aptos-move/framework/supra-framework/sources/vesting.spec.move +++ b/aptos-move/framework/supra-framework/sources/vesting.spec.move @@ -163,10 +163,6 @@ spec supra_framework::vesting { spec schema TotalAccumulatedRewardsAbortsIf { vesting_contract_address: address; - // Note: commission percentage should not be under 0 or higher than 100, cause it's a percentage number - // This requirement will solve the timeout issue of total_accumulated_rewards - // However, accumulated_rewards is still timeout - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; let vesting_contract = global(vesting_contract_address); @@ -294,7 +290,6 @@ spec supra_framework::vesting { // TODO: Calls `unlock_rewards` in loop. pragma verify = false; aborts_if len(contract_addresses) == 0; - include PreconditionAbortsIf; } spec vest(contract_address: address) { @@ -307,14 +302,6 @@ spec supra_framework::vesting { // TODO: Calls `vest` in loop. pragma verify = false; aborts_if len(contract_addresses) == 0; - include PreconditionAbortsIf; - } - - spec schema PreconditionAbortsIf { - contract_addresses: vector
; - - requires forall i in 0..len(contract_addresses): simple_map::spec_get(global(contract_addresses[i]).staking_contracts, global(contract_addresses[i]).staking.operator).commission_percentage >= 0 - && simple_map::spec_get(global(contract_addresses[i]).staking_contracts, global(contract_addresses[i]).staking.operator).commission_percentage <= 100; } spec distribute(contract_address: address) { diff --git a/aptos-move/framework/supra-framework/sources/vesting_without_staking.spec.move b/aptos-move/framework/supra-framework/sources/vesting_without_staking.spec.move index 3dd2c83d54f8c..966c2ceb707fd 100644 --- a/aptos-move/framework/supra-framework/sources/vesting_without_staking.spec.move +++ b/aptos-move/framework/supra-framework/sources/vesting_without_staking.spec.move @@ -107,28 +107,36 @@ spec supra_framework::vesting_without_staking { } spec vest_transfer { - pragma verify = true; + // TODO(fa_migration) + pragma verify = false; let amount = min(vesting_record.left_amount, fixed_point32::spec_multiply_u64(vesting_record.init_amount, vesting_fraction)); // Ensure that the amount is substracted from the left_amount ensures vesting_record.left_amount == old(vesting_record.left_amount) - amount; let address_from = signer_cap.account; + let coin_store_from = global>(address_from); + let post coin_store_post_from = global>(address_from); + let coin_store_to = global>(beneficiary); + let post coin_store_post_to = global>(beneficiary); // Ensure that the amount is transferred from the address_from to the beneficiary - ensures beneficiary != address_from ==> - (coin::balance(beneficiary) == old(coin::balance(beneficiary)) + amount - && coin::balance(address_from) == old(coin::balance(address_from)) - amount); + ensures beneficiary != address_from ==> coin_store_post_from.coin.value == + coin_store_from.coin.value - amount; + ensures beneficiary != address_from ==> coin_store_post_to.coin.value == coin_store_to.coin.value + amount; + ensures beneficiary == address_from ==> coin_store_post_from.coin.value == coin_store_from.coin.value; } spec remove_shareholder { - pragma verify = true; + // TODO(fa_migration) + pragma verify = false; pragma aborts_if_is_partial = true; include AdminAborts; let vesting_contract = global(contract_address); let post vesting_contract_post = global(contract_address); - let balance_pre = coin::balance(vesting_contract.withdrawal_address); - let post balance_post = coin::balance(vesting_contract_post.withdrawal_address); + let balance_pre = global>(vesting_contract.withdrawal_address).coin.value; + let post balance_post = global>(vesting_contract.withdrawal_address).coin.value; let shareholder_amount = simple_map::spec_get(vesting_contract.shareholders, shareholder_address).left_amount; // ensure that `withdrawal address` receives the `shareholder_amount` - ensures vesting_contract_post.withdrawal_address != vesting_contract.signer_cap.account ==> balance_post == balance_pre + shareholder_amount; + ensures vesting_contract_post.withdrawal_address != vesting_contract.signer_cap.account; + // ==> balance_post == balance_pre + shareholder_amount; // ensure that `shareholder_address` is indeed removed from the contract ensures !simple_map::spec_contains_key(vesting_contract_post.shareholders, shareholder_address); // ensure that beneficiary doesn't exist for the corresponding shareholder @@ -145,9 +153,9 @@ spec supra_framework::vesting_without_staking { pragma verify = true; pragma aborts_if_is_partial = true; let vesting_contract = global(contract_address); - let balance_pre = coin::balance(vesting_contract.withdrawal_address); - let post balance_post = coin::balance(vesting_contract.withdrawal_address); - let post balance_contract = coin::balance(contract_address); + let balance_pre = global>(vesting_contract.withdrawal_address).coin.value; + let post balance_post = global>(vesting_contract.withdrawal_address).coin.value; + let post balance_contract = global>(contract_address).coin.value; aborts_if !(global(contract_address).state == VESTING_POOL_TERMINATED); // // ensure that the `withdrawal_address` receives the remaining balance // ensures (vesting_contract.signer_cap.account != vesting_contract.withdrawal_address) ==> balance_post == balance_pre + coin::balance(contract_address); diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/account.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/account.md new file mode 100644 index 0000000000000..76710e0da3f69 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/account.md @@ -0,0 +1,3372 @@ + + + +# Module `0x1::account` + + + +- [Struct `KeyRotation`](#0x1_account_KeyRotation) +- [Resource `Account`](#0x1_account_Account) +- [Struct `KeyRotationEvent`](#0x1_account_KeyRotationEvent) +- [Struct `CoinRegisterEvent`](#0x1_account_CoinRegisterEvent) +- [Struct `CapabilityOffer`](#0x1_account_CapabilityOffer) +- [Struct `RotationCapability`](#0x1_account_RotationCapability) +- [Struct `SignerCapability`](#0x1_account_SignerCapability) +- [Resource `OriginatingAddress`](#0x1_account_OriginatingAddress) +- [Struct `RotationProofChallenge`](#0x1_account_RotationProofChallenge) +- [Struct `RotationCapabilityOfferProofChallenge`](#0x1_account_RotationCapabilityOfferProofChallenge) +- [Struct `SignerCapabilityOfferProofChallenge`](#0x1_account_SignerCapabilityOfferProofChallenge) +- [Struct `RotationCapabilityOfferProofChallengeV2`](#0x1_account_RotationCapabilityOfferProofChallengeV2) +- [Struct `SignerCapabilityOfferProofChallengeV2`](#0x1_account_SignerCapabilityOfferProofChallengeV2) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_account_initialize) +- [Function `create_account_if_does_not_exist`](#0x1_account_create_account_if_does_not_exist) +- [Function `create_account`](#0x1_account_create_account) +- [Function `create_account_unchecked`](#0x1_account_create_account_unchecked) +- [Function `exists_at`](#0x1_account_exists_at) +- [Function `get_guid_next_creation_num`](#0x1_account_get_guid_next_creation_num) +- [Function `get_sequence_number`](#0x1_account_get_sequence_number) +- [Function `increment_sequence_number`](#0x1_account_increment_sequence_number) +- [Function `get_authentication_key`](#0x1_account_get_authentication_key) +- [Function `rotate_authentication_key_internal`](#0x1_account_rotate_authentication_key_internal) +- [Function `rotate_authentication_key_call`](#0x1_account_rotate_authentication_key_call) +- [Function `rotate_authentication_key`](#0x1_account_rotate_authentication_key) +- [Function `rotate_authentication_key_with_rotation_capability`](#0x1_account_rotate_authentication_key_with_rotation_capability) +- [Function `offer_rotation_capability`](#0x1_account_offer_rotation_capability) +- [Function `is_rotation_capability_offered`](#0x1_account_is_rotation_capability_offered) +- [Function `get_rotation_capability_offer_for`](#0x1_account_get_rotation_capability_offer_for) +- [Function `revoke_rotation_capability`](#0x1_account_revoke_rotation_capability) +- [Function `revoke_any_rotation_capability`](#0x1_account_revoke_any_rotation_capability) +- [Function `offer_signer_capability`](#0x1_account_offer_signer_capability) +- [Function `is_signer_capability_offered`](#0x1_account_is_signer_capability_offered) +- [Function `get_signer_capability_offer_for`](#0x1_account_get_signer_capability_offer_for) +- [Function `revoke_signer_capability`](#0x1_account_revoke_signer_capability) +- [Function `revoke_any_signer_capability`](#0x1_account_revoke_any_signer_capability) +- [Function `create_authorized_signer`](#0x1_account_create_authorized_signer) +- [Function `assert_valid_rotation_proof_signature_and_get_auth_key`](#0x1_account_assert_valid_rotation_proof_signature_and_get_auth_key) +- [Function `update_auth_key_and_originating_address_table`](#0x1_account_update_auth_key_and_originating_address_table) +- [Function `create_resource_address`](#0x1_account_create_resource_address) +- [Function `create_resource_account`](#0x1_account_create_resource_account) +- [Function `create_framework_reserved_account`](#0x1_account_create_framework_reserved_account) +- [Function `create_guid`](#0x1_account_create_guid) +- [Function `new_event_handle`](#0x1_account_new_event_handle) +- [Function `register_coin`](#0x1_account_register_coin) +- [Function `create_signer_with_capability`](#0x1_account_create_signer_with_capability) +- [Function `get_signer_capability_address`](#0x1_account_get_signer_capability_address) +- [Function `verify_signed_message`](#0x1_account_verify_signed_message) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `create_account_if_does_not_exist`](#@Specification_1_create_account_if_does_not_exist) + - [Function `create_account`](#@Specification_1_create_account) + - [Function `create_account_unchecked`](#@Specification_1_create_account_unchecked) + - [Function `exists_at`](#@Specification_1_exists_at) + - [Function `get_guid_next_creation_num`](#@Specification_1_get_guid_next_creation_num) + - [Function `get_sequence_number`](#@Specification_1_get_sequence_number) + - [Function `increment_sequence_number`](#@Specification_1_increment_sequence_number) + - [Function `get_authentication_key`](#@Specification_1_get_authentication_key) + - [Function `rotate_authentication_key_internal`](#@Specification_1_rotate_authentication_key_internal) + - [Function `rotate_authentication_key_call`](#@Specification_1_rotate_authentication_key_call) + - [Function `rotate_authentication_key`](#@Specification_1_rotate_authentication_key) + - [Function `rotate_authentication_key_with_rotation_capability`](#@Specification_1_rotate_authentication_key_with_rotation_capability) + - [Function `offer_rotation_capability`](#@Specification_1_offer_rotation_capability) + - [Function `is_rotation_capability_offered`](#@Specification_1_is_rotation_capability_offered) + - [Function `get_rotation_capability_offer_for`](#@Specification_1_get_rotation_capability_offer_for) + - [Function `revoke_rotation_capability`](#@Specification_1_revoke_rotation_capability) + - [Function `revoke_any_rotation_capability`](#@Specification_1_revoke_any_rotation_capability) + - [Function `offer_signer_capability`](#@Specification_1_offer_signer_capability) + - [Function `is_signer_capability_offered`](#@Specification_1_is_signer_capability_offered) + - [Function `get_signer_capability_offer_for`](#@Specification_1_get_signer_capability_offer_for) + - [Function `revoke_signer_capability`](#@Specification_1_revoke_signer_capability) + - [Function `revoke_any_signer_capability`](#@Specification_1_revoke_any_signer_capability) + - [Function `create_authorized_signer`](#@Specification_1_create_authorized_signer) + - [Function `assert_valid_rotation_proof_signature_and_get_auth_key`](#@Specification_1_assert_valid_rotation_proof_signature_and_get_auth_key) + - [Function `update_auth_key_and_originating_address_table`](#@Specification_1_update_auth_key_and_originating_address_table) + - [Function `create_resource_address`](#@Specification_1_create_resource_address) + - [Function `create_resource_account`](#@Specification_1_create_resource_account) + - [Function `create_framework_reserved_account`](#@Specification_1_create_framework_reserved_account) + - [Function `create_guid`](#@Specification_1_create_guid) + - [Function `new_event_handle`](#@Specification_1_new_event_handle) + - [Function `register_coin`](#@Specification_1_register_coin) + - [Function `create_signer_with_capability`](#@Specification_1_create_signer_with_capability) + - [Function `verify_signed_message`](#@Specification_1_verify_signed_message) + + +
use 0x1::bcs;
+use 0x1::chain_id;
+use 0x1::create_signer;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::from_bcs;
+use 0x1::guid;
+use 0x1::hash;
+use 0x1::multi_ed25519;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::system_addresses;
+use 0x1::table;
+use 0x1::type_info;
+use 0x1::vector;
+
+ + + + + +## Struct `KeyRotation` + + + +
#[event]
+struct KeyRotation has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+old_authentication_key: vector<u8> +
+
+ +
+
+new_authentication_key: vector<u8> +
+
+ +
+
+ + +
+ + + +## Resource `Account` + +Resource representing an account. + + +
struct Account has store, key
+
+ + + +
+Fields + + +
+
+authentication_key: vector<u8> +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+guid_creation_num: u64 +
+
+ +
+
+coin_register_events: event::EventHandle<account::CoinRegisterEvent> +
+
+ +
+
+key_rotation_events: event::EventHandle<account::KeyRotationEvent> +
+
+ +
+
+rotation_capability_offer: account::CapabilityOffer<account::RotationCapability> +
+
+ +
+
+signer_capability_offer: account::CapabilityOffer<account::SignerCapability> +
+
+ +
+
+ + +
+ + + +## Struct `KeyRotationEvent` + + + +
struct KeyRotationEvent has drop, store
+
+ + + +
+Fields + + +
+
+old_authentication_key: vector<u8> +
+
+ +
+
+new_authentication_key: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `CoinRegisterEvent` + + + +
struct CoinRegisterEvent has drop, store
+
+ + + +
+Fields + + +
+
+type_info: type_info::TypeInfo +
+
+ +
+
+ + +
+ + + +## Struct `CapabilityOffer` + + + +
struct CapabilityOffer<T> has store
+
+ + + +
+Fields + + +
+
+for: option::Option<address> +
+
+ +
+
+ + +
+ + + +## Struct `RotationCapability` + + + +
struct RotationCapability has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+ + +
+ + + +## Struct `SignerCapability` + + + +
struct SignerCapability has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+ + +
+ + + +## Resource `OriginatingAddress` + +It is easy to fetch the authentication key of an address by simply reading it from the Account struct at that address. +The table in this struct makes it possible to do a reverse lookup: it maps an authentication key, to the address of the account which has that authentication key set. + +This mapping is needed when recovering wallets for accounts whose authentication key has been rotated. + +For example, imagine a freshly-created wallet with address a and thus also with authentication key a, derived from a PK pk_a with corresponding SK sk_a. +It is easy to recover such a wallet given just the secret key sk_a, since the PK can be derived from the SK, the authentication key can then be derived from the PK, and the address equals the authentication key (since there was no key rotation). + +However, if such a wallet rotates its authentication key to b derived from a different PK pk_b with SK sk_b, how would account recovery work? +The recovered address would no longer be 'a'; it would be b, which is incorrect. +This struct solves this problem by mapping the new authentication key b to the original address a and thus helping the wallet software during recovery find the correct address. + + +
struct OriginatingAddress has key
+
+ + + +
+Fields + + +
+
+address_map: table::Table<address, address> +
+
+ +
+
+ + +
+ + + +## Struct `RotationProofChallenge` + +This structs stores the challenge message that should be signed during key rotation. First, this struct is +signed by the account owner's current public key, which proves possession of a capability to rotate the key. +Second, this struct is signed by the new public key that the account owner wants to rotate to, which proves +knowledge of this new public key's associated secret key. These two signatures cannot be replayed in another +context because they include the TXN's unique sequence number. + + +
struct RotationProofChallenge has copy, drop
+
+ + + +
+Fields + + +
+
+sequence_number: u64 +
+
+ +
+
+originator: address +
+
+ +
+
+current_auth_key: address +
+
+ +
+
+new_public_key: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `RotationCapabilityOfferProofChallenge` + +Deprecated struct - newest version is RotationCapabilityOfferProofChallengeV2 + + +
struct RotationCapabilityOfferProofChallenge has drop
+
+ + + +
+Fields + + +
+
+sequence_number: u64 +
+
+ +
+
+recipient_address: address +
+
+ +
+
+ + +
+ + + +## Struct `SignerCapabilityOfferProofChallenge` + +Deprecated struct - newest version is SignerCapabilityOfferProofChallengeV2 + + +
struct SignerCapabilityOfferProofChallenge has drop
+
+ + + +
+Fields + + +
+
+sequence_number: u64 +
+
+ +
+
+recipient_address: address +
+
+ +
+
+ + +
+ + + +## Struct `RotationCapabilityOfferProofChallengeV2` + +This struct stores the challenge message that should be signed by the source account, when the source account +is delegating its rotation capability to the recipient_address. +This V2 struct adds the chain_id and source_address to the challenge message, which prevents replaying the challenge message. + + +
struct RotationCapabilityOfferProofChallengeV2 has drop
+
+ + + +
+Fields + + +
+
+chain_id: u8 +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+source_address: address +
+
+ +
+
+recipient_address: address +
+
+ +
+
+ + +
+ + + +## Struct `SignerCapabilityOfferProofChallengeV2` + + + +
struct SignerCapabilityOfferProofChallengeV2 has copy, drop
+
+ + + +
+Fields + + +
+
+sequence_number: u64 +
+
+ +
+
+source_address: address +
+
+ +
+
+recipient_address: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + +Scheme identifier used when hashing an account's address together with a seed to derive the address (not the +authentication key) of a resource account. This is an abuse of the notion of a scheme identifier which, for now, +serves to domain separate hashes used to derive resource account addresses from hashes used to derive +authentication keys. Without such separation, an adversary could create (and get a signer for) a resource account +whose address matches an existing address of a MultiEd25519 wallet. + + +
const DERIVE_RESOURCE_ACCOUNT_SCHEME: u8 = 255;
+
+ + + + + +Account already exists + + +
const EACCOUNT_ALREADY_EXISTS: u64 = 1;
+
+ + + + + +An attempt to create a resource account on an account that has a committed transaction + + +
const EACCOUNT_ALREADY_USED: u64 = 16;
+
+ + + + + +Account does not exist + + +
const EACCOUNT_DOES_NOT_EXIST: u64 = 2;
+
+ + + + + +Cannot create account because address is reserved + + +
const ECANNOT_RESERVED_ADDRESS: u64 = 5;
+
+ + + + + +Scheme identifier for Ed25519 signatures used to derive authentication keys for Ed25519 public keys. + + +
const ED25519_SCHEME: u8 = 0;
+
+ + + + + + + +
const EEXCEEDED_MAX_GUID_CREATION_NUM: u64 = 20;
+
+ + + + + +The caller does not have a valid rotation capability offer from the other account + + +
const EINVALID_ACCEPT_ROTATION_CAPABILITY: u64 = 10;
+
+ + + + + +Abort the transaction if the expected originating address is different from the originating address on-chain + + +
const EINVALID_ORIGINATING_ADDRESS: u64 = 13;
+
+ + + + + +Specified proof of knowledge required to prove ownership of a public key is invalid + + +
const EINVALID_PROOF_OF_KNOWLEDGE: u64 = 8;
+
+ + + + + +Specified scheme required to proceed with the smart contract operation - can only be ED25519_SCHEME(0) OR MULTI_ED25519_SCHEME(1) + + +
const EINVALID_SCHEME: u64 = 12;
+
+ + + + + +The provided authentication key has an invalid length + + +
const EMALFORMED_AUTHENTICATION_KEY: u64 = 4;
+
+ + + + + +The caller does not have a digital-signature-based capability to call this function + + +
const ENO_CAPABILITY: u64 = 9;
+
+ + + + + + + +
const ENO_SIGNER_CAPABILITY_OFFERED: u64 = 19;
+
+ + + + + +The specified rotation capability offer does not exist at the specified offerer address + + +
const ENO_SUCH_ROTATION_CAPABILITY_OFFER: u64 = 18;
+
+ + + + + +The signer capability offer doesn't exist at the given address + + +
const ENO_SUCH_SIGNER_CAPABILITY: u64 = 14;
+
+ + + + + +Address to create is not a valid reserved address for Aptos framework + + +
const ENO_VALID_FRAMEWORK_RESERVED_ADDRESS: u64 = 11;
+
+ + + + + +Offerer address doesn't exist + + +
const EOFFERER_ADDRESS_DOES_NOT_EXIST: u64 = 17;
+
+ + + + + +Transaction exceeded its allocated max gas + + +
const EOUT_OF_GAS: u64 = 6;
+
+ + + + + +An attempt to create a resource account on a claimed account + + +
const ERESOURCE_ACCCOUNT_EXISTS: u64 = 15;
+
+ + + + + +Sequence number exceeds the maximum value for a u64 + + +
const ESEQUENCE_NUMBER_TOO_BIG: u64 = 3;
+
+ + + + + +Specified current public key is not correct + + +
const EWRONG_CURRENT_PUBLIC_KEY: u64 = 7;
+
+ + + + + +Explicitly separate the GUID space between Object and Account to prevent accidental overlap. + + +
const MAX_GUID_CREATION_NUM: u64 = 1125899906842624;
+
+ + + + + +Scheme identifier for MultiEd25519 signatures used to derive authentication keys for MultiEd25519 public keys. + + +
const MULTI_ED25519_SCHEME: u8 = 1;
+
+ + + + + + + +
const ZERO_AUTH_KEY: vector<u8> = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+
+ + + + + +## Function `initialize` + +Only called during genesis to initialize system resources for this module. + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(aptos_framework, OriginatingAddress {
+        address_map: table::new(),
+    });
+}
+
+ + + +
+ + + +## Function `create_account_if_does_not_exist` + + + +
public fun create_account_if_does_not_exist(account_address: address)
+
+ + + +
+Implementation + + +
public fun create_account_if_does_not_exist(account_address: address) {
+    if (!exists<Account>(account_address)) {
+        create_account(account_address);
+    }
+}
+
+ + + +
+ + + +## Function `create_account` + +Publishes a new Account resource under new_address. A signer representing new_address +is returned. This way, the caller of this function can publish additional resources under +new_address. + + +
public(friend) fun create_account(new_address: address): signer
+
+ + + +
+Implementation + + +
public(friend) fun create_account(new_address: address): signer {
+    // there cannot be an Account resource under new_addr already.
+    assert!(!exists<Account>(new_address), error::already_exists(EACCOUNT_ALREADY_EXISTS));
+
+    // NOTE: @core_resources gets created via a `create_account` call, so we do not include it below.
+    assert!(
+        new_address != @vm_reserved && new_address != @aptos_framework && new_address != @aptos_token,
+        error::invalid_argument(ECANNOT_RESERVED_ADDRESS)
+    );
+
+    create_account_unchecked(new_address)
+}
+
+ + + +
+ + + +## Function `create_account_unchecked` + + + +
fun create_account_unchecked(new_address: address): signer
+
+ + + +
+Implementation + + +
fun create_account_unchecked(new_address: address): signer {
+    let new_account = create_signer(new_address);
+    let authentication_key = bcs::to_bytes(&new_address);
+    assert!(
+        vector::length(&authentication_key) == 32,
+        error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY)
+    );
+
+    let guid_creation_num = 0;
+
+    let guid_for_coin = guid::create(new_address, &mut guid_creation_num);
+    let coin_register_events = event::new_event_handle<CoinRegisterEvent>(guid_for_coin);
+
+    let guid_for_rotation = guid::create(new_address, &mut guid_creation_num);
+    let key_rotation_events = event::new_event_handle<KeyRotationEvent>(guid_for_rotation);
+
+    move_to(
+        &new_account,
+        Account {
+            authentication_key,
+            sequence_number: 0,
+            guid_creation_num,
+            coin_register_events,
+            key_rotation_events,
+            rotation_capability_offer: CapabilityOffer { for: option::none() },
+            signer_capability_offer: CapabilityOffer { for: option::none() },
+        }
+    );
+
+    new_account
+}
+
+ + + +
+ + + +## Function `exists_at` + + + +
#[view]
+public fun exists_at(addr: address): bool
+
+ + + +
+Implementation + + +
public fun exists_at(addr: address): bool {
+    exists<Account>(addr)
+}
+
+ + + +
+ + + +## Function `get_guid_next_creation_num` + + + +
#[view]
+public fun get_guid_next_creation_num(addr: address): u64
+
+ + + +
+Implementation + + +
public fun get_guid_next_creation_num(addr: address): u64 acquires Account {
+    borrow_global<Account>(addr).guid_creation_num
+}
+
+ + + +
+ + + +## Function `get_sequence_number` + + + +
#[view]
+public fun get_sequence_number(addr: address): u64
+
+ + + +
+Implementation + + +
public fun get_sequence_number(addr: address): u64 acquires Account {
+    borrow_global<Account>(addr).sequence_number
+}
+
+ + + +
+ + + +## Function `increment_sequence_number` + + + +
public(friend) fun increment_sequence_number(addr: address)
+
+ + + +
+Implementation + + +
public(friend) fun increment_sequence_number(addr: address) acquires Account {
+    let sequence_number = &mut borrow_global_mut<Account>(addr).sequence_number;
+
+    assert!(
+        (*sequence_number as u128) < MAX_U64,
+        error::out_of_range(ESEQUENCE_NUMBER_TOO_BIG)
+    );
+
+    *sequence_number = *sequence_number + 1;
+}
+
+ + + +
+ + + +## Function `get_authentication_key` + + + +
#[view]
+public fun get_authentication_key(addr: address): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_authentication_key(addr: address): vector<u8> acquires Account {
+    borrow_global<Account>(addr).authentication_key
+}
+
+ + + +
+ + + +## Function `rotate_authentication_key_internal` + +This function is used to rotate a resource account's authentication key to new_auth_key. This is done in +many contexts: +1. During normal key rotation via rotate_authentication_key or rotate_authentication_key_call +2. During resource account initialization so that no private key can control the resource account +3. During multisig_v2 account creation + + +
public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector<u8>)
+
+ + + +
+Implementation + + +
public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector<u8>) acquires Account {
+    let addr = signer::address_of(account);
+    assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    assert!(
+        vector::length(&new_auth_key) == 32,
+        error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY)
+    );
+    let account_resource = borrow_global_mut<Account>(addr);
+    account_resource.authentication_key = new_auth_key;
+}
+
+ + + +
+ + + +## Function `rotate_authentication_key_call` + +Private entry function for key rotation that allows the signer to update their authentication key. +Note that this does not update the OriginatingAddress table because the new_auth_key is not "verified": it +does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to +the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in +the format expected in rotate_authentication_key. + + +
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>)
+
+ + + +
+Implementation + + +
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>) acquires Account {
+    rotate_authentication_key_internal(account, new_auth_key);
+}
+
+ + + +
+ + + +## Function `rotate_authentication_key` + +Generic authentication key rotation function that allows the user to rotate their authentication key from any scheme to any scheme. +To authorize the rotation, we need two signatures: +- the first signature cap_rotate_key refers to the signature by the account owner's current key on a valid RotationProofChallenge, +demonstrating that the user intends to and has the capability to rotate the authentication key of this account; +- the second signature cap_update_table refers to the signature by the new key (that the account owner wants to rotate to) on a +valid RotationProofChallenge, demonstrating that the user owns the new private key, and has the authority to update the +OriginatingAddress map with the new address mapping <new_address, originating_address>. +To verify these two signatures, we need their corresponding public key and public key scheme: we use from_scheme and from_public_key_bytes +to verify cap_rotate_key, and to_scheme and to_public_key_bytes to verify cap_update_table. +A scheme of 0 refers to an Ed25519 key and a scheme of 1 refers to Multi-Ed25519 keys. +originating address refers to an account's original/first address. + +Here is an example attack if we don't ask for the second signature cap_update_table: +Alice has rotated her account addr_a to new_addr_a. As a result, the following entry is created, to help Alice when recovering her wallet: +OriginatingAddress[new_addr_a] -> addr_a +Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. +(Fortunately, she still has her secret key new_sk_a associated with her new address new_addr_a, so she can do this.) + +But Bob likes to mess with Alice. +Bob creates an account addr_b and maliciously rotates it to Alice's new address new_addr_a. Since we are no longer checking a PoK, +Bob can easily do this. + +Now, the table will be updated to make Alice's new address point to Bob's address: OriginatingAddress[new_addr_a] -> addr_b. +When Alice recovers her account, her wallet will display the attacker's address (Bob's) addr_b as her address. +Now Alice will give addr_b to everyone to pay her, but the money will go to Bob. + +Because we ask for a valid cap_update_table, this kind of attack is not possible. Bob would not have the secret key of Alice's address +to rotate his address to Alice's address in the first place. + + +
public entry fun rotate_authentication_key(account: &signer, from_scheme: u8, from_public_key_bytes: vector<u8>, to_scheme: u8, to_public_key_bytes: vector<u8>, cap_rotate_key: vector<u8>, cap_update_table: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun rotate_authentication_key(
+    account: &signer,
+    from_scheme: u8,
+    from_public_key_bytes: vector<u8>,
+    to_scheme: u8,
+    to_public_key_bytes: vector<u8>,
+    cap_rotate_key: vector<u8>,
+    cap_update_table: vector<u8>,
+) acquires Account, OriginatingAddress {
+    let addr = signer::address_of(account);
+    assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    let account_resource = borrow_global_mut<Account>(addr);
+
+    // Verify the given `from_public_key_bytes` matches this account's current authentication key.
+    if (from_scheme == ED25519_SCHEME) {
+        let from_pk = ed25519::new_unvalidated_public_key_from_bytes(from_public_key_bytes);
+        let from_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&from_pk);
+        assert!(
+            account_resource.authentication_key == from_auth_key,
+            error::unauthenticated(EWRONG_CURRENT_PUBLIC_KEY)
+        );
+    } else if (from_scheme == MULTI_ED25519_SCHEME) {
+        let from_pk = multi_ed25519::new_unvalidated_public_key_from_bytes(from_public_key_bytes);
+        let from_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&from_pk);
+        assert!(
+            account_resource.authentication_key == from_auth_key,
+            error::unauthenticated(EWRONG_CURRENT_PUBLIC_KEY)
+        );
+    } else {
+        abort error::invalid_argument(EINVALID_SCHEME)
+    };
+
+    // Construct a valid `RotationProofChallenge` that `cap_rotate_key` and `cap_update_table` will validate against.
+    let curr_auth_key_as_address = from_bcs::to_address(account_resource.authentication_key);
+    let challenge = RotationProofChallenge {
+        sequence_number: account_resource.sequence_number,
+        originator: addr,
+        current_auth_key: curr_auth_key_as_address,
+        new_public_key: to_public_key_bytes,
+    };
+
+    // Assert the challenges signed by the current and new keys are valid
+    assert_valid_rotation_proof_signature_and_get_auth_key(
+        from_scheme,
+        from_public_key_bytes,
+        cap_rotate_key,
+        &challenge
+    );
+    let new_auth_key = assert_valid_rotation_proof_signature_and_get_auth_key(
+        to_scheme,
+        to_public_key_bytes,
+        cap_update_table,
+        &challenge
+    );
+
+    // Update the `OriginatingAddress` table.
+    update_auth_key_and_originating_address_table(addr, account_resource, new_auth_key);
+}
+
+ + + +
+ + + +## Function `rotate_authentication_key_with_rotation_capability` + + + +
public entry fun rotate_authentication_key_with_rotation_capability(delegate_signer: &signer, rotation_cap_offerer_address: address, new_scheme: u8, new_public_key_bytes: vector<u8>, cap_update_table: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun rotate_authentication_key_with_rotation_capability(
+    delegate_signer: &signer,
+    rotation_cap_offerer_address: address,
+    new_scheme: u8,
+    new_public_key_bytes: vector<u8>,
+    cap_update_table: vector<u8>
+) acquires Account, OriginatingAddress {
+    assert!(exists_at(rotation_cap_offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST));
+
+    // Check that there exists a rotation capability offer at the offerer's account resource for the delegate.
+    let delegate_address = signer::address_of(delegate_signer);
+    let offerer_account_resource = borrow_global<Account>(rotation_cap_offerer_address);
+    assert!(
+        option::contains(&offerer_account_resource.rotation_capability_offer.for, &delegate_address),
+        error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER)
+    );
+
+    let curr_auth_key = from_bcs::to_address(offerer_account_resource.authentication_key);
+    let challenge = RotationProofChallenge {
+        sequence_number: get_sequence_number(delegate_address),
+        originator: rotation_cap_offerer_address,
+        current_auth_key: curr_auth_key,
+        new_public_key: new_public_key_bytes,
+    };
+
+    // Verifies that the `RotationProofChallenge` from above is signed under the new public key that we are rotating to.        l
+    let new_auth_key = assert_valid_rotation_proof_signature_and_get_auth_key(
+        new_scheme,
+        new_public_key_bytes,
+        cap_update_table,
+        &challenge
+    );
+
+    // Update the `OriginatingAddress` table, so we can find the originating address using the new address.
+    let offerer_account_resource = borrow_global_mut<Account>(rotation_cap_offerer_address);
+    update_auth_key_and_originating_address_table(
+        rotation_cap_offerer_address,
+        offerer_account_resource,
+        new_auth_key
+    );
+}
+
+ + + +
+ + + +## Function `offer_rotation_capability` + +Offers rotation capability on behalf of account to the account at address recipient_address. +An account can delegate its rotation capability to only one other address at one time. If the account +has an existing rotation capability offer, calling this function will update the rotation capability offer with +the new recipient_address. +Here, rotation_capability_sig_bytes signature indicates that this key rotation is authorized by the account owner, +and prevents the classic "time-of-check time-of-use" attack. +For example, users usually rely on what the wallet displays to them as the transaction's outcome. Consider a contract that with 50% probability +(based on the current timestamp in Move), rotates somebody's key. The wallet might be unlucky and get an outcome where nothing is rotated, +incorrectly telling the user nothing bad will happen. But when the transaction actually gets executed, the attacker gets lucky and +the execution path triggers the account key rotation. +We prevent such attacks by asking for this extra signature authorizing the key rotation. + +@param rotation_capability_sig_bytes is the signature by the account owner's key on RotationCapabilityOfferProofChallengeV2. +@param account_scheme is the scheme of the account (ed25519 or multi_ed25519). +@param account_public_key_bytes is the public key of the account owner. +@param recipient_address is the address of the recipient of the rotation capability - note that if there's an existing rotation capability +offer, calling this function will replace the previous recipient_address upon successful verification. + + +
public entry fun offer_rotation_capability(account: &signer, rotation_capability_sig_bytes: vector<u8>, account_scheme: u8, account_public_key_bytes: vector<u8>, recipient_address: address)
+
+ + + +
+Implementation + + +
public entry fun offer_rotation_capability(
+    account: &signer,
+    rotation_capability_sig_bytes: vector<u8>,
+    account_scheme: u8,
+    account_public_key_bytes: vector<u8>,
+    recipient_address: address,
+) acquires Account {
+    let addr = signer::address_of(account);
+    assert!(exists_at(recipient_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+
+    // proof that this account intends to delegate its rotation capability to another account
+    let account_resource = borrow_global_mut<Account>(addr);
+    let proof_challenge = RotationCapabilityOfferProofChallengeV2 {
+        chain_id: chain_id::get(),
+        sequence_number: account_resource.sequence_number,
+        source_address: addr,
+        recipient_address,
+    };
+
+    // verify the signature on `RotationCapabilityOfferProofChallengeV2` by the account owner
+    if (account_scheme == ED25519_SCHEME) {
+        let pubkey = ed25519::new_unvalidated_public_key_from_bytes(account_public_key_bytes);
+        let expected_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&pubkey);
+        assert!(
+            account_resource.authentication_key == expected_auth_key,
+            error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY)
+        );
+
+        let rotation_capability_sig = ed25519::new_signature_from_bytes(rotation_capability_sig_bytes);
+        assert!(
+            ed25519::signature_verify_strict_t(&rotation_capability_sig, &pubkey, proof_challenge),
+            error::invalid_argument(EINVALID_PROOF_OF_KNOWLEDGE)
+        );
+    } else if (account_scheme == MULTI_ED25519_SCHEME) {
+        let pubkey = multi_ed25519::new_unvalidated_public_key_from_bytes(account_public_key_bytes);
+        let expected_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&pubkey);
+        assert!(
+            account_resource.authentication_key == expected_auth_key,
+            error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY)
+        );
+
+        let rotation_capability_sig = multi_ed25519::new_signature_from_bytes(rotation_capability_sig_bytes);
+        assert!(
+            multi_ed25519::signature_verify_strict_t(&rotation_capability_sig, &pubkey, proof_challenge),
+            error::invalid_argument(EINVALID_PROOF_OF_KNOWLEDGE)
+        );
+    } else {
+        abort error::invalid_argument(EINVALID_SCHEME)
+    };
+
+    // update the existing rotation capability offer or put in a new rotation capability offer for the current account
+    option::swap_or_fill(&mut account_resource.rotation_capability_offer.for, recipient_address);
+}
+
+ + + +
+ + + +## Function `is_rotation_capability_offered` + +Returns true if the account at account_addr has a rotation capability offer. + + +
#[view]
+public fun is_rotation_capability_offered(account_addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_rotation_capability_offered(account_addr: address): bool acquires Account {
+    let account_resource = borrow_global<Account>(account_addr);
+    option::is_some(&account_resource.rotation_capability_offer.for)
+}
+
+ + + +
+ + + +## Function `get_rotation_capability_offer_for` + +Returns the address of the account that has a rotation capability offer from the account at account_addr. + + +
#[view]
+public fun get_rotation_capability_offer_for(account_addr: address): address
+
+ + + +
+Implementation + + +
public fun get_rotation_capability_offer_for(account_addr: address): address acquires Account {
+    let account_resource = borrow_global<Account>(account_addr);
+    assert!(
+        option::is_some(&account_resource.rotation_capability_offer.for),
+        error::not_found(ENO_SIGNER_CAPABILITY_OFFERED),
+    );
+    *option::borrow(&account_resource.rotation_capability_offer.for)
+}
+
+ + + +
+ + + +## Function `revoke_rotation_capability` + +Revoke the rotation capability offer given to to_be_revoked_recipient_address from account + + +
public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address)
+
+ + + +
+Implementation + + +
public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address) acquires Account {
+    assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    let addr = signer::address_of(account);
+    let account_resource = borrow_global_mut<Account>(addr);
+    assert!(
+        option::contains(&account_resource.rotation_capability_offer.for, &to_be_revoked_address),
+        error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER)
+    );
+    revoke_any_rotation_capability(account);
+}
+
+ + + +
+ + + +## Function `revoke_any_rotation_capability` + +Revoke any rotation capability offer in the specified account. + + +
public entry fun revoke_any_rotation_capability(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun revoke_any_rotation_capability(account: &signer) acquires Account {
+    let account_resource = borrow_global_mut<Account>(signer::address_of(account));
+    option::extract(&mut account_resource.rotation_capability_offer.for);
+}
+
+ + + +
+ + + +## Function `offer_signer_capability` + +Offers signer capability on behalf of account to the account at address recipient_address. +An account can delegate its signer capability to only one other address at one time. +signer_capability_key_bytes is the SignerCapabilityOfferProofChallengeV2 signed by the account owner's key +account_scheme is the scheme of the account (ed25519 or multi_ed25519). +account_public_key_bytes is the public key of the account owner. +recipient_address is the address of the recipient of the signer capability - note that if there's an existing +recipient_address in the account owner's SignerCapabilityOffer, this will replace the +previous recipient_address upon successful verification (the previous recipient will no longer have access +to the account owner's signer capability). + + +
public entry fun offer_signer_capability(account: &signer, signer_capability_sig_bytes: vector<u8>, account_scheme: u8, account_public_key_bytes: vector<u8>, recipient_address: address)
+
+ + + +
+Implementation + + +
public entry fun offer_signer_capability(
+    account: &signer,
+    signer_capability_sig_bytes: vector<u8>,
+    account_scheme: u8,
+    account_public_key_bytes: vector<u8>,
+    recipient_address: address
+) acquires Account {
+    let source_address = signer::address_of(account);
+    assert!(exists_at(recipient_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+
+    // Proof that this account intends to delegate its signer capability to another account.
+    let proof_challenge = SignerCapabilityOfferProofChallengeV2 {
+        sequence_number: get_sequence_number(source_address),
+        source_address,
+        recipient_address,
+    };
+    verify_signed_message(
+        source_address, account_scheme, account_public_key_bytes, signer_capability_sig_bytes, proof_challenge);
+
+    // Update the existing signer capability offer or put in a new signer capability offer for the recipient.
+    let account_resource = borrow_global_mut<Account>(source_address);
+    option::swap_or_fill(&mut account_resource.signer_capability_offer.for, recipient_address);
+}
+
+ + + +
+ + + +## Function `is_signer_capability_offered` + +Returns true if the account at account_addr has a signer capability offer. + + +
#[view]
+public fun is_signer_capability_offered(account_addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_signer_capability_offered(account_addr: address): bool acquires Account {
+    let account_resource = borrow_global<Account>(account_addr);
+    option::is_some(&account_resource.signer_capability_offer.for)
+}
+
+ + + +
+ + + +## Function `get_signer_capability_offer_for` + +Returns the address of the account that has a signer capability offer from the account at account_addr. + + +
#[view]
+public fun get_signer_capability_offer_for(account_addr: address): address
+
+ + + +
+Implementation + + +
public fun get_signer_capability_offer_for(account_addr: address): address acquires Account {
+    let account_resource = borrow_global<Account>(account_addr);
+    assert!(
+        option::is_some(&account_resource.signer_capability_offer.for),
+        error::not_found(ENO_SIGNER_CAPABILITY_OFFERED),
+    );
+    *option::borrow(&account_resource.signer_capability_offer.for)
+}
+
+ + + +
+ + + +## Function `revoke_signer_capability` + +Revoke the account owner's signer capability offer for to_be_revoked_address (i.e., the address that +has a signer capability offer from account but will be revoked in this function). + + +
public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address)
+
+ + + +
+Implementation + + +
public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address) acquires Account {
+    assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    let addr = signer::address_of(account);
+    let account_resource = borrow_global_mut<Account>(addr);
+    assert!(
+        option::contains(&account_resource.signer_capability_offer.for, &to_be_revoked_address),
+        error::not_found(ENO_SUCH_SIGNER_CAPABILITY)
+    );
+    revoke_any_signer_capability(account);
+}
+
+ + + +
+ + + +## Function `revoke_any_signer_capability` + +Revoke any signer capability offer in the specified account. + + +
public entry fun revoke_any_signer_capability(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun revoke_any_signer_capability(account: &signer) acquires Account {
+    let account_resource = borrow_global_mut<Account>(signer::address_of(account));
+    option::extract(&mut account_resource.signer_capability_offer.for);
+}
+
+ + + +
+ + + +## Function `create_authorized_signer` + +Return an authorized signer of the offerer, if there's an existing signer capability offer for account +at the offerer's address. + + +
public fun create_authorized_signer(account: &signer, offerer_address: address): signer
+
+ + + +
+Implementation + + +
public fun create_authorized_signer(account: &signer, offerer_address: address): signer acquires Account {
+    assert!(exists_at(offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST));
+
+    // Check if there's an existing signer capability offer from the offerer.
+    let account_resource = borrow_global<Account>(offerer_address);
+    let addr = signer::address_of(account);
+    assert!(
+        option::contains(&account_resource.signer_capability_offer.for, &addr),
+        error::not_found(ENO_SUCH_SIGNER_CAPABILITY)
+    );
+
+    create_signer(offerer_address)
+}
+
+ + + +
+ + + +## Function `assert_valid_rotation_proof_signature_and_get_auth_key` + +Helper functions for authentication key rotation. + + +
fun assert_valid_rotation_proof_signature_and_get_auth_key(scheme: u8, public_key_bytes: vector<u8>, signature: vector<u8>, challenge: &account::RotationProofChallenge): vector<u8>
+
+ + + +
+Implementation + + +
fun assert_valid_rotation_proof_signature_and_get_auth_key(
+    scheme: u8,
+    public_key_bytes: vector<u8>,
+    signature: vector<u8>,
+    challenge: &RotationProofChallenge
+): vector<u8> {
+    if (scheme == ED25519_SCHEME) {
+        let pk = ed25519::new_unvalidated_public_key_from_bytes(public_key_bytes);
+        let sig = ed25519::new_signature_from_bytes(signature);
+        assert!(
+            ed25519::signature_verify_strict_t(&sig, &pk, *challenge),
+            std::error::invalid_argument(EINVALID_PROOF_OF_KNOWLEDGE)
+        );
+        ed25519::unvalidated_public_key_to_authentication_key(&pk)
+    } else if (scheme == MULTI_ED25519_SCHEME) {
+        let pk = multi_ed25519::new_unvalidated_public_key_from_bytes(public_key_bytes);
+        let sig = multi_ed25519::new_signature_from_bytes(signature);
+        assert!(
+            multi_ed25519::signature_verify_strict_t(&sig, &pk, *challenge),
+            std::error::invalid_argument(EINVALID_PROOF_OF_KNOWLEDGE)
+        );
+        multi_ed25519::unvalidated_public_key_to_authentication_key(&pk)
+    } else {
+        abort error::invalid_argument(EINVALID_SCHEME)
+    }
+}
+
+ + + +
+ + + +## Function `update_auth_key_and_originating_address_table` + +Update the OriginatingAddress table, so that we can find the originating address using the latest address +in the event of key recovery. + + +
fun update_auth_key_and_originating_address_table(originating_addr: address, account_resource: &mut account::Account, new_auth_key_vector: vector<u8>)
+
+ + + +
+Implementation + + +
fun update_auth_key_and_originating_address_table(
+    originating_addr: address,
+    account_resource: &mut Account,
+    new_auth_key_vector: vector<u8>,
+) acquires OriginatingAddress {
+    let address_map = &mut borrow_global_mut<OriginatingAddress>(@aptos_framework).address_map;
+    let curr_auth_key = from_bcs::to_address(account_resource.authentication_key);
+
+    // Checks `OriginatingAddress[curr_auth_key]` is either unmapped, or mapped to `originating_address`.
+    // If it's mapped to the originating address, removes that mapping.
+    // Otherwise, abort if it's mapped to a different address.
+    if (table::contains(address_map, curr_auth_key)) {
+        // If account_a with address_a is rotating its keypair from keypair_a to keypair_b, we expect
+        // the address of the account to stay the same, while its keypair updates to keypair_b.
+        // Here, by asserting that we're calling from the account with the originating address, we enforce
+        // the standard of keeping the same address and updating the keypair at the contract level.
+        // Without this assertion, the dapps could also update the account's address to address_b (the address that
+        // is programmatically related to keypaier_b) and update the keypair to keypair_b. This causes problems
+        // for interoperability because different dapps can implement this in different ways.
+        // If the account with address b calls this function with two valid signatures, it will abort at this step,
+        // because address b is not the account's originating address.
+        assert!(
+            originating_addr == table::remove(address_map, curr_auth_key),
+            error::not_found(EINVALID_ORIGINATING_ADDRESS)
+        );
+    };
+
+    // Set `OriginatingAddress[new_auth_key] = originating_address`.
+    let new_auth_key = from_bcs::to_address(new_auth_key_vector);
+    table::add(address_map, new_auth_key, originating_addr);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(KeyRotation {
+            account: originating_addr,
+            old_authentication_key: account_resource.authentication_key,
+            new_authentication_key: new_auth_key_vector,
+        });
+    };
+    event::emit_event<KeyRotationEvent>(
+        &mut account_resource.key_rotation_events,
+        KeyRotationEvent {
+            old_authentication_key: account_resource.authentication_key,
+            new_authentication_key: new_auth_key_vector,
+        }
+    );
+
+    // Update the account resource's authentication key.
+    account_resource.authentication_key = new_auth_key_vector;
+}
+
+ + + +
+ + + +## Function `create_resource_address` + +Basic account creation methods. +This is a helper function to compute resource addresses. Computation of the address +involves the use of a cryptographic hash operation and should be use thoughtfully. + + +
public fun create_resource_address(source: &address, seed: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun create_resource_address(source: &address, seed: vector<u8>): address {
+    let bytes = bcs::to_bytes(source);
+    vector::append(&mut bytes, seed);
+    vector::push_back(&mut bytes, DERIVE_RESOURCE_ACCOUNT_SCHEME);
+    from_bcs::to_address(hash::sha3_256(bytes))
+}
+
+ + + +
+ + + +## Function `create_resource_account` + +A resource account is used to manage resources independent of an account managed by a user. +In Aptos a resource account is created based upon the sha3 256 of the source's address and additional seed data. +A resource account can only be created once, this is designated by setting the +Account::signer_capability_offer::for to the address of the resource account. While an entity may call +create_account to attempt to claim an account ahead of the creation of a resource account, if found Aptos will +transition ownership of the account over to the resource account. This is done by validating that the account has +yet to execute any transactions and that the Account::signer_capability_offer::for is none. The probability of a +collision where someone has legitimately produced a private key that maps to a resource account address is less +than (1/2)^(256). + + +
public fun create_resource_account(source: &signer, seed: vector<u8>): (signer, account::SignerCapability)
+
+ + + +
+Implementation + + +
public fun create_resource_account(source: &signer, seed: vector<u8>): (signer, SignerCapability) acquires Account {
+    let resource_addr = create_resource_address(&signer::address_of(source), seed);
+    let resource = if (exists_at(resource_addr)) {
+        let account = borrow_global<Account>(resource_addr);
+        assert!(
+            option::is_none(&account.signer_capability_offer.for),
+            error::already_exists(ERESOURCE_ACCCOUNT_EXISTS),
+        );
+        assert!(
+            account.sequence_number == 0,
+            error::invalid_state(EACCOUNT_ALREADY_USED),
+        );
+        create_signer(resource_addr)
+    } else {
+        create_account_unchecked(resource_addr)
+    };
+
+    // By default, only the SignerCapability should have control over the resource account and not the auth key.
+    // If the source account wants direct control via auth key, they would need to explicitly rotate the auth key
+    // of the resource account using the SignerCapability.
+    rotate_authentication_key_internal(&resource, ZERO_AUTH_KEY);
+
+    let account = borrow_global_mut<Account>(resource_addr);
+    account.signer_capability_offer.for = option::some(resource_addr);
+    let signer_cap = SignerCapability { account: resource_addr };
+    (resource, signer_cap)
+}
+
+ + + +
+ + + +## Function `create_framework_reserved_account` + +create the account for system reserved addresses + + +
public(friend) fun create_framework_reserved_account(addr: address): (signer, account::SignerCapability)
+
+ + + +
+Implementation + + +
public(friend) fun create_framework_reserved_account(addr: address): (signer, SignerCapability) {
+    assert!(
+        addr == @0x1 ||
+            addr == @0x2 ||
+            addr == @0x3 ||
+            addr == @0x4 ||
+            addr == @0x5 ||
+            addr == @0x6 ||
+            addr == @0x7 ||
+            addr == @0x8 ||
+            addr == @0x9 ||
+            addr == @0xa,
+        error::permission_denied(ENO_VALID_FRAMEWORK_RESERVED_ADDRESS),
+    );
+    let signer = create_account_unchecked(addr);
+    let signer_cap = SignerCapability { account: addr };
+    (signer, signer_cap)
+}
+
+ + + +
+ + + +## Function `create_guid` + +GUID management methods. + + +
public fun create_guid(account_signer: &signer): guid::GUID
+
+ + + +
+Implementation + + +
public fun create_guid(account_signer: &signer): guid::GUID acquires Account {
+    let addr = signer::address_of(account_signer);
+    let account = borrow_global_mut<Account>(addr);
+    let guid = guid::create(addr, &mut account.guid_creation_num);
+    assert!(
+        account.guid_creation_num < MAX_GUID_CREATION_NUM,
+        error::out_of_range(EEXCEEDED_MAX_GUID_CREATION_NUM),
+    );
+    guid
+}
+
+ + + +
+ + + +## Function `new_event_handle` + +GUID management methods. + + +
public fun new_event_handle<T: drop, store>(account: &signer): event::EventHandle<T>
+
+ + + +
+Implementation + + +
public fun new_event_handle<T: drop + store>(account: &signer): EventHandle<T> acquires Account {
+    event::new_event_handle(create_guid(account))
+}
+
+ + + +
+ + + +## Function `register_coin` + +Coin management methods. + + +
public(friend) fun register_coin<CoinType>(account_addr: address)
+
+ + + +
+Implementation + + +
public(friend) fun register_coin<CoinType>(account_addr: address) acquires Account {
+    let account = borrow_global_mut<Account>(account_addr);
+    event::emit_event<CoinRegisterEvent>(
+        &mut account.coin_register_events,
+        CoinRegisterEvent {
+            type_info: type_info::type_of<CoinType>(),
+        },
+    );
+}
+
+ + + +
+ + + +## Function `create_signer_with_capability` + +Capability based functions for efficient use. + + +
public fun create_signer_with_capability(capability: &account::SignerCapability): signer
+
+ + + +
+Implementation + + +
public fun create_signer_with_capability(capability: &SignerCapability): signer {
+    let addr = &capability.account;
+    create_signer(*addr)
+}
+
+ + + +
+ + + +## Function `get_signer_capability_address` + + + +
public fun get_signer_capability_address(capability: &account::SignerCapability): address
+
+ + + +
+Implementation + + +
public fun get_signer_capability_address(capability: &SignerCapability): address {
+    capability.account
+}
+
+ + + +
+ + + +## Function `verify_signed_message` + + + +
public fun verify_signed_message<T: drop>(account: address, account_scheme: u8, account_public_key: vector<u8>, signed_message_bytes: vector<u8>, message: T)
+
+ + + +
+Implementation + + +
public fun verify_signed_message<T: drop>(
+    account: address,
+    account_scheme: u8,
+    account_public_key: vector<u8>,
+    signed_message_bytes: vector<u8>,
+    message: T,
+) acquires Account {
+    let account_resource = borrow_global_mut<Account>(account);
+    // Verify that the `SignerCapabilityOfferProofChallengeV2` has the right information and is signed by the account owner's key
+    if (account_scheme == ED25519_SCHEME) {
+        let pubkey = ed25519::new_unvalidated_public_key_from_bytes(account_public_key);
+        let expected_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&pubkey);
+        assert!(
+            account_resource.authentication_key == expected_auth_key,
+            error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY),
+        );
+
+        let signer_capability_sig = ed25519::new_signature_from_bytes(signed_message_bytes);
+        assert!(
+            ed25519::signature_verify_strict_t(&signer_capability_sig, &pubkey, message),
+            error::invalid_argument(EINVALID_PROOF_OF_KNOWLEDGE),
+        );
+    } else if (account_scheme == MULTI_ED25519_SCHEME) {
+        let pubkey = multi_ed25519::new_unvalidated_public_key_from_bytes(account_public_key);
+        let expected_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&pubkey);
+        assert!(
+            account_resource.authentication_key == expected_auth_key,
+            error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY),
+        );
+
+        let signer_capability_sig = multi_ed25519::new_signature_from_bytes(signed_message_bytes);
+        assert!(
+            multi_ed25519::signature_verify_strict_t(&signer_capability_sig, &pubkey, message),
+            error::invalid_argument(EINVALID_PROOF_OF_KNOWLEDGE),
+        );
+    } else {
+        abort error::invalid_argument(EINVALID_SCHEME)
+    };
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The initialization of the account module should result in the proper system initialization with valid and consistent resources.HighInitialization of the account module creates a valid address_map table and moves the resources to the OriginatingAddress under the aptos_framework account.Audited that the address_map table is created and populated correctly with the expected initial values.
2After successfully creating an account, the account resources should initialize with the default data, ensuring the proper initialization of the account state.HighCreating an account via the create_account function validates the state and moves a new account resource under new_address.Formally verified via create_account.
3Checking the existence of an account under a given address never results in an abort.LowThe exists_at function returns a boolean value indicating the existence of an account under the given address.Formally verified by the aborts_if condition.
4The account module maintains bounded sequence numbers for all accounts, guaranteeing they remain within the specified limit.MediumThe sequence number of an account may only increase up to MAX_U64 in a succeeding manner.Formally verified via increment_sequence_number that it remains within the defined boundary of MAX_U64.
5Only the ed25519 and multied25519 signature schemes are permissible.LowExclusively perform key rotation using either the ed25519 or multied25519 signature schemes. Currently restricts the offering of rotation/signing capabilities to the ed25519 or multied25519 schemes.Formally Verified: rotate_authentication_key, offer_rotation_capability, and offer_signer_capability. Verified that it aborts if the account_scheme is not ED25519_SCHEME and not MULTI_ED25519_SCHEME. Audited that the scheme enums correspond correctly to signature logic.
6Exclusively permit the rotation of the authentication key of an account for the account owner or any user who possesses rotation capabilities associated with that account.CriticalIn the rotate_authentication_key function, the authentication key derived from the from_public_key_bytes should match the signer's current authentication key. Only the delegate_signer granted the rotation capabilities may invoke the rotate_authentication_key_with_rotation_capability function.Formally Verified via rotate_authentication_key and rotate_authentication_key_with_rotation_capability.
7Only the owner of an account may offer or revoke the following capabilities: (1) offer_rotation_capability, (2) offer_signer_capability, (3) revoke_rotation_capability, and (4) revoke_signer_capability.CriticalAn account resource may only be modified by the owner of the account utilizing: rotation_capability_offer, signer_capability_offer.Formally verified via offer_rotation_capability, offer_signer_capability, and revoke_rotation_capability. and revoke_signer_capability.
8The capability to create a signer for the account is exclusively reserved for either the account owner or the account that has been granted the signing capabilities.CriticalSigner creation for the account may only be successfully executed by explicitly granting the signing capabilities with the create_authorized_signer function.Formally verified via create_authorized_signer.
9Rotating the authentication key requires two valid signatures. With the private key of the current authentication key. With the private key of the new authentication key.CriticalThe rotate_authentication_key verifies two signatures (current and new) before rotating to the new key. The first signature ensures the user has the intended capability, and the second signature ensures that the user owns the new key.Formally verified via rotate_authentication_key and rotate_authentication_key_with_rotation_capability.
10The rotation of the authentication key updates the account's authentication key with the newly supplied one.HighThe auth_key may only update to the provided new_auth_key after verifying the signature.Formally Verified in rotate_authentication_key_internal that the authentication key of an account is modified to the provided authentication key if the signature verification was successful.
11The creation number is monotonically increasing.LowThe guid_creation_num in the Account structure is monotonically increasing.Formally Verified via guid_creation_num.
12The Account resource is persistent.LowThe Account structure assigned to the address should be persistent.Audited that the Account structure is persistent.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + +Only the address @aptos_framework can call. +OriginatingAddress does not exist under @aptos_framework before the call. + + +
let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<OriginatingAddress>(aptos_addr);
+ensures exists<OriginatingAddress>(aptos_addr);
+
+ + + + + +### Function `create_account_if_does_not_exist` + + +
public fun create_account_if_does_not_exist(account_address: address)
+
+ + +Ensure that the account exists at the end of the call. + + +
let authentication_key = bcs::to_bytes(account_address);
+aborts_if !exists<Account>(account_address) && (
+    account_address == @vm_reserved
+    || account_address == @aptos_framework
+    || account_address == @aptos_token
+    || !(len(authentication_key) == 32)
+);
+ensures exists<Account>(account_address);
+
+ + + + + +### Function `create_account` + + +
public(friend) fun create_account(new_address: address): signer
+
+ + +Check if the bytes of the new address is 32. +The Account does not exist under the new address before creating the account. +Limit the new account address is not @vm_reserved / @aptos_framework / @aptos_toke. + + +
include CreateAccountAbortsIf {addr: new_address};
+aborts_if new_address == @vm_reserved || new_address == @aptos_framework || new_address == @aptos_token;
+ensures signer::address_of(result) == new_address;
+// This enforces high-level requirement 2:
+ensures exists<Account>(new_address);
+
+ + + + + +### Function `create_account_unchecked` + + +
fun create_account_unchecked(new_address: address): signer
+
+ + +Check if the bytes of the new address is 32. +The Account does not exist under the new address before creating the account. + + +
include CreateAccountAbortsIf {addr: new_address};
+ensures signer::address_of(result) == new_address;
+ensures exists<Account>(new_address);
+
+ + + + + +### Function `exists_at` + + +
#[view]
+public fun exists_at(addr: address): bool
+
+ + + + +
// This enforces high-level requirement 3:
+aborts_if false;
+
+ + + + + + + +
schema CreateAccountAbortsIf {
+    addr: address;
+    let authentication_key = bcs::to_bytes(addr);
+    aborts_if len(authentication_key) != 32;
+    aborts_if exists<Account>(addr);
+    ensures len(authentication_key) == 32;
+}
+
+ + + + + +### Function `get_guid_next_creation_num` + + +
#[view]
+public fun get_guid_next_creation_num(addr: address): u64
+
+ + + + +
aborts_if !exists<Account>(addr);
+ensures result == global<Account>(addr).guid_creation_num;
+
+ + + + + +### Function `get_sequence_number` + + +
#[view]
+public fun get_sequence_number(addr: address): u64
+
+ + + + +
aborts_if !exists<Account>(addr);
+ensures result == global<Account>(addr).sequence_number;
+
+ + + + + +### Function `increment_sequence_number` + + +
public(friend) fun increment_sequence_number(addr: address)
+
+ + +The Account existed under the address. +The sequence_number of the Account is up to MAX_U64. + + +
let sequence_number = global<Account>(addr).sequence_number;
+aborts_if !exists<Account>(addr);
+// This enforces high-level requirement 4:
+aborts_if sequence_number == MAX_U64;
+modifies global<Account>(addr);
+let post post_sequence_number = global<Account>(addr).sequence_number;
+ensures post_sequence_number == sequence_number + 1;
+
+ + + + + +### Function `get_authentication_key` + + +
#[view]
+public fun get_authentication_key(addr: address): vector<u8>
+
+ + + + +
aborts_if !exists<Account>(addr);
+ensures result == global<Account>(addr).authentication_key;
+
+ + + + + +### Function `rotate_authentication_key_internal` + + +
public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector<u8>)
+
+ + +The Account existed under the signer before the call. +The length of new_auth_key is 32. + + +
let addr = signer::address_of(account);
+// This enforces high-level requirement 10:
+let post account_resource = global<Account>(addr);
+aborts_if !exists<Account>(addr);
+aborts_if vector::length(new_auth_key) != 32;
+modifies global<Account>(addr);
+ensures account_resource.authentication_key == new_auth_key;
+
+ + + + + +### Function `rotate_authentication_key_call` + + +
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>)
+
+ + + + +
let addr = signer::address_of(account);
+// This enforces high-level requirement 10:
+let post account_resource = global<Account>(addr);
+aborts_if !exists<Account>(addr);
+aborts_if vector::length(new_auth_key) != 32;
+modifies global<Account>(addr);
+ensures account_resource.authentication_key == new_auth_key;
+
+ + + + + + + +
fun spec_assert_valid_rotation_proof_signature_and_get_auth_key(scheme: u8, public_key_bytes: vector<u8>, signature: vector<u8>, challenge: RotationProofChallenge): vector<u8>;
+
+ + + + + +### Function `rotate_authentication_key` + + +
public entry fun rotate_authentication_key(account: &signer, from_scheme: u8, from_public_key_bytes: vector<u8>, to_scheme: u8, to_public_key_bytes: vector<u8>, cap_rotate_key: vector<u8>, cap_update_table: vector<u8>)
+
+ + +The Account existed under the signer +The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME + + +
let addr = signer::address_of(account);
+let account_resource = global<Account>(addr);
+aborts_if !exists<Account>(addr);
+// This enforces high-level requirement 6:
+include from_scheme == ED25519_SCHEME ==> ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: from_public_key_bytes };
+aborts_if from_scheme == ED25519_SCHEME && ({
+    let expected_auth_key = ed25519::spec_public_key_bytes_to_authentication_key(from_public_key_bytes);
+    account_resource.authentication_key != expected_auth_key
+});
+include from_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: from_public_key_bytes };
+aborts_if from_scheme == MULTI_ED25519_SCHEME && ({
+    let from_auth_key = multi_ed25519::spec_public_key_bytes_to_authentication_key(from_public_key_bytes);
+    account_resource.authentication_key != from_auth_key
+});
+// This enforces high-level requirement 5:
+aborts_if from_scheme != ED25519_SCHEME && from_scheme != MULTI_ED25519_SCHEME;
+let curr_auth_key = from_bcs::deserialize<address>(account_resource.authentication_key);
+aborts_if !from_bcs::deserializable<address>(account_resource.authentication_key);
+let challenge = RotationProofChallenge {
+    sequence_number: account_resource.sequence_number,
+    originator: addr,
+    current_auth_key: curr_auth_key,
+    new_public_key: to_public_key_bytes,
+};
+// This enforces high-level requirement 9:
+include AssertValidRotationProofSignatureAndGetAuthKeyAbortsIf {
+    scheme: from_scheme,
+    public_key_bytes: from_public_key_bytes,
+    signature: cap_rotate_key,
+    challenge,
+};
+include AssertValidRotationProofSignatureAndGetAuthKeyAbortsIf {
+    scheme: to_scheme,
+    public_key_bytes: to_public_key_bytes,
+    signature: cap_update_table,
+    challenge,
+};
+let originating_addr = addr;
+let new_auth_key_vector = spec_assert_valid_rotation_proof_signature_and_get_auth_key(to_scheme, to_public_key_bytes, cap_update_table, challenge);
+let address_map = global<OriginatingAddress>(@aptos_framework).address_map;
+let new_auth_key = from_bcs::deserialize<address>(new_auth_key_vector);
+aborts_if !exists<OriginatingAddress>(@aptos_framework);
+aborts_if !from_bcs::deserializable<address>(account_resource.authentication_key);
+aborts_if table::spec_contains(address_map, curr_auth_key) &&
+    table::spec_get(address_map, curr_auth_key) != originating_addr;
+aborts_if !from_bcs::deserializable<address>(new_auth_key_vector);
+aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key);
+include UpdateAuthKeyAndOriginatingAddressTableAbortsIf {
+    originating_addr: addr,
+};
+let post auth_key = global<Account>(addr).authentication_key;
+ensures auth_key == new_auth_key_vector;
+
+ + + + + +### Function `rotate_authentication_key_with_rotation_capability` + + +
public entry fun rotate_authentication_key_with_rotation_capability(delegate_signer: &signer, rotation_cap_offerer_address: address, new_scheme: u8, new_public_key_bytes: vector<u8>, cap_update_table: vector<u8>)
+
+ + + + +
aborts_if !exists<Account>(rotation_cap_offerer_address);
+let delegate_address = signer::address_of(delegate_signer);
+let offerer_account_resource = global<Account>(rotation_cap_offerer_address);
+aborts_if !from_bcs::deserializable<address>(offerer_account_resource.authentication_key);
+let curr_auth_key = from_bcs::deserialize<address>(offerer_account_resource.authentication_key);
+aborts_if !exists<Account>(delegate_address);
+let challenge = RotationProofChallenge {
+    sequence_number: global<Account>(delegate_address).sequence_number,
+    originator: rotation_cap_offerer_address,
+    current_auth_key: curr_auth_key,
+    new_public_key: new_public_key_bytes,
+};
+// This enforces high-level requirement 6:
+aborts_if !option::spec_contains(offerer_account_resource.rotation_capability_offer.for, delegate_address);
+// This enforces high-level requirement 9:
+include AssertValidRotationProofSignatureAndGetAuthKeyAbortsIf {
+    scheme: new_scheme,
+    public_key_bytes: new_public_key_bytes,
+    signature: cap_update_table,
+    challenge,
+};
+let new_auth_key_vector = spec_assert_valid_rotation_proof_signature_and_get_auth_key(new_scheme, new_public_key_bytes, cap_update_table, challenge);
+let address_map = global<OriginatingAddress>(@aptos_framework).address_map;
+aborts_if !exists<OriginatingAddress>(@aptos_framework);
+aborts_if !from_bcs::deserializable<address>(offerer_account_resource.authentication_key);
+aborts_if table::spec_contains(address_map, curr_auth_key) &&
+    table::spec_get(address_map, curr_auth_key) != rotation_cap_offerer_address;
+aborts_if !from_bcs::deserializable<address>(new_auth_key_vector);
+let new_auth_key = from_bcs::deserialize<address>(new_auth_key_vector);
+aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key);
+include UpdateAuthKeyAndOriginatingAddressTableAbortsIf {
+    originating_addr: rotation_cap_offerer_address,
+    account_resource: offerer_account_resource,
+};
+let post auth_key = global<Account>(rotation_cap_offerer_address).authentication_key;
+ensures auth_key == new_auth_key_vector;
+
+ + + + + +### Function `offer_rotation_capability` + + +
public entry fun offer_rotation_capability(account: &signer, rotation_capability_sig_bytes: vector<u8>, account_scheme: u8, account_public_key_bytes: vector<u8>, recipient_address: address)
+
+ + + + +
let source_address = signer::address_of(account);
+let account_resource = global<Account>(source_address);
+let proof_challenge = RotationCapabilityOfferProofChallengeV2 {
+    chain_id: global<chain_id::ChainId>(@aptos_framework).id,
+    sequence_number: account_resource.sequence_number,
+    source_address,
+    recipient_address,
+};
+aborts_if !exists<chain_id::ChainId>(@aptos_framework);
+aborts_if !exists<Account>(recipient_address);
+aborts_if !exists<Account>(source_address);
+include account_scheme == ED25519_SCHEME ==> ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: account_public_key_bytes };
+aborts_if account_scheme == ED25519_SCHEME && ({
+    let expected_auth_key = ed25519::spec_public_key_bytes_to_authentication_key(account_public_key_bytes);
+    account_resource.authentication_key != expected_auth_key
+});
+include account_scheme == ED25519_SCHEME ==> ed25519::NewSignatureFromBytesAbortsIf { bytes: rotation_capability_sig_bytes };
+aborts_if account_scheme == ED25519_SCHEME && !ed25519::spec_signature_verify_strict_t(
+    ed25519::Signature { bytes: rotation_capability_sig_bytes },
+    ed25519::UnvalidatedPublicKey { bytes: account_public_key_bytes },
+    proof_challenge
+);
+include account_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: account_public_key_bytes };
+aborts_if account_scheme == MULTI_ED25519_SCHEME && ({
+    let expected_auth_key = multi_ed25519::spec_public_key_bytes_to_authentication_key(account_public_key_bytes);
+    account_resource.authentication_key != expected_auth_key
+});
+include account_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewSignatureFromBytesAbortsIf { bytes: rotation_capability_sig_bytes };
+aborts_if account_scheme == MULTI_ED25519_SCHEME && !multi_ed25519::spec_signature_verify_strict_t(
+    multi_ed25519::Signature { bytes: rotation_capability_sig_bytes },
+    multi_ed25519::UnvalidatedPublicKey { bytes: account_public_key_bytes },
+    proof_challenge
+);
+// This enforces high-level requirement 5:
+aborts_if account_scheme != ED25519_SCHEME && account_scheme != MULTI_ED25519_SCHEME;
+// This enforces high-level requirement 7:
+modifies global<Account>(source_address);
+let post offer_for = global<Account>(source_address).rotation_capability_offer.for;
+ensures option::spec_borrow(offer_for) == recipient_address;
+
+ + + + + +### Function `is_rotation_capability_offered` + + +
#[view]
+public fun is_rotation_capability_offered(account_addr: address): bool
+
+ + + + +
aborts_if !exists<Account>(account_addr);
+
+ + + + + +### Function `get_rotation_capability_offer_for` + + +
#[view]
+public fun get_rotation_capability_offer_for(account_addr: address): address
+
+ + + + +
aborts_if !exists<Account>(account_addr);
+let account_resource = global<Account>(account_addr);
+aborts_if len(account_resource.rotation_capability_offer.for.vec) == 0;
+
+ + + + + +### Function `revoke_rotation_capability` + + +
public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address)
+
+ + + + +
aborts_if !exists<Account>(to_be_revoked_address);
+let addr = signer::address_of(account);
+let account_resource = global<Account>(addr);
+aborts_if !exists<Account>(addr);
+aborts_if !option::spec_contains(account_resource.rotation_capability_offer.for,to_be_revoked_address);
+modifies global<Account>(addr);
+ensures exists<Account>(to_be_revoked_address);
+let post offer_for = global<Account>(addr).rotation_capability_offer.for;
+ensures !option::spec_is_some(offer_for);
+
+ + + + + +### Function `revoke_any_rotation_capability` + + +
public entry fun revoke_any_rotation_capability(account: &signer)
+
+ + + + +
let addr = signer::address_of(account);
+modifies global<Account>(addr);
+aborts_if !exists<Account>(addr);
+let account_resource = global<Account>(addr);
+// This enforces high-level requirement 7:
+aborts_if !option::is_some(account_resource.rotation_capability_offer.for);
+let post offer_for = global<Account>(addr).rotation_capability_offer.for;
+ensures !option::spec_is_some(offer_for);
+
+ + + + + +### Function `offer_signer_capability` + + +
public entry fun offer_signer_capability(account: &signer, signer_capability_sig_bytes: vector<u8>, account_scheme: u8, account_public_key_bytes: vector<u8>, recipient_address: address)
+
+ + +The Account existed under the signer. +The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME. + + +
let source_address = signer::address_of(account);
+let account_resource = global<Account>(source_address);
+let proof_challenge = SignerCapabilityOfferProofChallengeV2 {
+    sequence_number: account_resource.sequence_number,
+    source_address,
+    recipient_address,
+};
+aborts_if !exists<Account>(recipient_address);
+aborts_if !exists<Account>(source_address);
+include account_scheme == ED25519_SCHEME ==> ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: account_public_key_bytes };
+aborts_if account_scheme == ED25519_SCHEME && ({
+    let expected_auth_key = ed25519::spec_public_key_bytes_to_authentication_key(account_public_key_bytes);
+    account_resource.authentication_key != expected_auth_key
+});
+include account_scheme == ED25519_SCHEME ==> ed25519::NewSignatureFromBytesAbortsIf { bytes: signer_capability_sig_bytes };
+aborts_if account_scheme == ED25519_SCHEME && !ed25519::spec_signature_verify_strict_t(
+    ed25519::Signature { bytes: signer_capability_sig_bytes },
+    ed25519::UnvalidatedPublicKey { bytes: account_public_key_bytes },
+    proof_challenge
+);
+include account_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: account_public_key_bytes };
+aborts_if account_scheme == MULTI_ED25519_SCHEME && ({
+    let expected_auth_key = multi_ed25519::spec_public_key_bytes_to_authentication_key(account_public_key_bytes);
+    account_resource.authentication_key != expected_auth_key
+});
+include account_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewSignatureFromBytesAbortsIf { bytes: signer_capability_sig_bytes };
+aborts_if account_scheme == MULTI_ED25519_SCHEME && !multi_ed25519::spec_signature_verify_strict_t(
+    multi_ed25519::Signature { bytes: signer_capability_sig_bytes },
+    multi_ed25519::UnvalidatedPublicKey { bytes: account_public_key_bytes },
+    proof_challenge
+);
+// This enforces high-level requirement 5:
+aborts_if account_scheme != ED25519_SCHEME && account_scheme != MULTI_ED25519_SCHEME;
+// This enforces high-level requirement 7:
+modifies global<Account>(source_address);
+let post offer_for = global<Account>(source_address).signer_capability_offer.for;
+ensures option::spec_borrow(offer_for) == recipient_address;
+
+ + + + + +### Function `is_signer_capability_offered` + + +
#[view]
+public fun is_signer_capability_offered(account_addr: address): bool
+
+ + + + +
aborts_if !exists<Account>(account_addr);
+
+ + + + + +### Function `get_signer_capability_offer_for` + + +
#[view]
+public fun get_signer_capability_offer_for(account_addr: address): address
+
+ + + + +
aborts_if !exists<Account>(account_addr);
+let account_resource = global<Account>(account_addr);
+aborts_if len(account_resource.signer_capability_offer.for.vec) == 0;
+
+ + + + + +### Function `revoke_signer_capability` + + +
public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address)
+
+ + +The Account existed under the signer. +The value of signer_capability_offer.for of Account resource under the signer is to_be_revoked_address. + + +
aborts_if !exists<Account>(to_be_revoked_address);
+let addr = signer::address_of(account);
+let account_resource = global<Account>(addr);
+aborts_if !exists<Account>(addr);
+aborts_if !option::spec_contains(account_resource.signer_capability_offer.for,to_be_revoked_address);
+modifies global<Account>(addr);
+ensures exists<Account>(to_be_revoked_address);
+
+ + + + + +### Function `revoke_any_signer_capability` + + +
public entry fun revoke_any_signer_capability(account: &signer)
+
+ + + + +
modifies global<Account>(signer::address_of(account));
+// This enforces high-level requirement 7:
+aborts_if !exists<Account>(signer::address_of(account));
+let account_resource = global<Account>(signer::address_of(account));
+aborts_if !option::is_some(account_resource.signer_capability_offer.for);
+
+ + + + + +### Function `create_authorized_signer` + + +
public fun create_authorized_signer(account: &signer, offerer_address: address): signer
+
+ + +The Account existed under the signer. +The value of signer_capability_offer.for of Account resource under the signer is offerer_address. + + +
// This enforces high-level requirement 8:
+include AccountContainsAddr{
+    account,
+    address: offerer_address,
+};
+modifies global<Account>(offerer_address);
+ensures exists<Account>(offerer_address);
+ensures signer::address_of(result) == offerer_address;
+
+ + + + + + + +
schema AccountContainsAddr {
+    account: signer;
+    address: address;
+    let addr = signer::address_of(account);
+    let account_resource = global<Account>(address);
+    aborts_if !exists<Account>(address);
+    // This enforces high-level requirement 3 of the create_signer module:
+    aborts_if !option::spec_contains(account_resource.signer_capability_offer.for,addr);
+}
+
+ + + + + +### Function `assert_valid_rotation_proof_signature_and_get_auth_key` + + +
fun assert_valid_rotation_proof_signature_and_get_auth_key(scheme: u8, public_key_bytes: vector<u8>, signature: vector<u8>, challenge: &account::RotationProofChallenge): vector<u8>
+
+ + + + +
pragma opaque;
+include AssertValidRotationProofSignatureAndGetAuthKeyAbortsIf;
+ensures [abstract] result == spec_assert_valid_rotation_proof_signature_and_get_auth_key(scheme, public_key_bytes, signature, challenge);
+
+ + + + + + + +
schema AssertValidRotationProofSignatureAndGetAuthKeyAbortsIf {
+    scheme: u8;
+    public_key_bytes: vector<u8>;
+    signature: vector<u8>;
+    challenge: RotationProofChallenge;
+    include scheme == ED25519_SCHEME ==> ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: public_key_bytes };
+    include scheme == ED25519_SCHEME ==> ed25519::NewSignatureFromBytesAbortsIf { bytes: signature };
+    aborts_if scheme == ED25519_SCHEME && !ed25519::spec_signature_verify_strict_t(
+        ed25519::Signature { bytes: signature },
+        ed25519::UnvalidatedPublicKey { bytes: public_key_bytes },
+        challenge
+    );
+    include scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: public_key_bytes };
+    include scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewSignatureFromBytesAbortsIf { bytes: signature };
+    aborts_if scheme == MULTI_ED25519_SCHEME && !multi_ed25519::spec_signature_verify_strict_t(
+        multi_ed25519::Signature { bytes: signature },
+        multi_ed25519::UnvalidatedPublicKey { bytes: public_key_bytes },
+        challenge
+    );
+    aborts_if scheme != ED25519_SCHEME && scheme != MULTI_ED25519_SCHEME;
+}
+
+ + + + + +### Function `update_auth_key_and_originating_address_table` + + +
fun update_auth_key_and_originating_address_table(originating_addr: address, account_resource: &mut account::Account, new_auth_key_vector: vector<u8>)
+
+ + + + +
modifies global<OriginatingAddress>(@aptos_framework);
+include UpdateAuthKeyAndOriginatingAddressTableAbortsIf;
+
+ + + + + + + +
schema UpdateAuthKeyAndOriginatingAddressTableAbortsIf {
+    originating_addr: address;
+    account_resource: Account;
+    new_auth_key_vector: vector<u8>;
+    let address_map = global<OriginatingAddress>(@aptos_framework).address_map;
+    let curr_auth_key = from_bcs::deserialize<address>(account_resource.authentication_key);
+    let new_auth_key = from_bcs::deserialize<address>(new_auth_key_vector);
+    aborts_if !exists<OriginatingAddress>(@aptos_framework);
+    aborts_if !from_bcs::deserializable<address>(account_resource.authentication_key);
+    aborts_if table::spec_contains(address_map, curr_auth_key) &&
+        table::spec_get(address_map, curr_auth_key) != originating_addr;
+    aborts_if !from_bcs::deserializable<address>(new_auth_key_vector);
+    aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key);
+    ensures table::spec_contains(global<OriginatingAddress>(@aptos_framework).address_map, from_bcs::deserialize<address>(new_auth_key_vector));
+}
+
+ + + + + +### Function `create_resource_address` + + +
public fun create_resource_address(source: &address, seed: vector<u8>): address
+
+ + +The Account existed under the signer +The value of signer_capability_offer.for of Account resource under the signer is to_be_revoked_address + + +
pragma opaque;
+pragma aborts_if_is_strict = false;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_create_resource_address(source, seed);
+ensures [abstract] source != result;
+
+ + + + + + + +
fun spec_create_resource_address(source: address, seed: vector<u8>): address;
+
+ + + + + +### Function `create_resource_account` + + +
public fun create_resource_account(source: &signer, seed: vector<u8>): (signer, account::SignerCapability)
+
+ + + + +
let source_addr = signer::address_of(source);
+let resource_addr = spec_create_resource_address(source_addr, seed);
+aborts_if len(ZERO_AUTH_KEY) != 32;
+include exists_at(resource_addr) ==> CreateResourceAccountAbortsIf;
+include !exists_at(resource_addr) ==> CreateAccountAbortsIf {addr: resource_addr};
+ensures signer::address_of(result_1) == resource_addr;
+let post offer_for = global<Account>(resource_addr).signer_capability_offer.for;
+ensures option::spec_borrow(offer_for) == resource_addr;
+ensures result_2 == SignerCapability { account: resource_addr };
+
+ + + + + +### Function `create_framework_reserved_account` + + +
public(friend) fun create_framework_reserved_account(addr: address): (signer, account::SignerCapability)
+
+ + +Check if the bytes of the new address is 32. +The Account does not exist under the new address before creating the account. +The system reserved addresses is @0x1 / @0x2 / @0x3 / @0x4 / @0x5 / @0x6 / @0x7 / @0x8 / @0x9 / @0xa. + + +
aborts_if spec_is_framework_address(addr);
+include CreateAccountAbortsIf {addr};
+ensures signer::address_of(result_1) == addr;
+ensures result_2 == SignerCapability { account: addr };
+
+ + + + + + + +
fun spec_is_framework_address(addr: address): bool{
+   addr != @0x1 &&
+   addr != @0x2 &&
+   addr != @0x3 &&
+   addr != @0x4 &&
+   addr != @0x5 &&
+   addr != @0x6 &&
+   addr != @0x7 &&
+   addr != @0x8 &&
+   addr != @0x9 &&
+   addr != @0xa
+}
+
+ + + + + +### Function `create_guid` + + +
public fun create_guid(account_signer: &signer): guid::GUID
+
+ + +The Account existed under the signer. +The guid_creation_num of the account resource is up to MAX_U64. + + +
let addr = signer::address_of(account_signer);
+include NewEventHandleAbortsIf {
+    account: account_signer,
+};
+modifies global<Account>(addr);
+// This enforces high-level requirement 11:
+ensures global<Account>(addr).guid_creation_num == old(global<Account>(addr).guid_creation_num) + 1;
+
+ + + + + +### Function `new_event_handle` + + +
public fun new_event_handle<T: drop, store>(account: &signer): event::EventHandle<T>
+
+ + +The Account existed under the signer. +The guid_creation_num of the Account is up to MAX_U64. + + +
include NewEventHandleAbortsIf;
+
+ + + + + + + +
schema NewEventHandleAbortsIf {
+    account: &signer;
+    let addr = signer::address_of(account);
+    let account = global<Account>(addr);
+    aborts_if !exists<Account>(addr);
+    aborts_if account.guid_creation_num + 1 > MAX_U64;
+    aborts_if account.guid_creation_num + 1 >= MAX_GUID_CREATION_NUM;
+}
+
+ + + + + +### Function `register_coin` + + +
public(friend) fun register_coin<CoinType>(account_addr: address)
+
+ + + + +
aborts_if !exists<Account>(account_addr);
+aborts_if !type_info::spec_is_struct<CoinType>();
+modifies global<Account>(account_addr);
+
+ + + + + +### Function `create_signer_with_capability` + + +
public fun create_signer_with_capability(capability: &account::SignerCapability): signer
+
+ + + + +
let addr = capability.account;
+ensures signer::address_of(result) == addr;
+
+ + + + + + + +
schema CreateResourceAccountAbortsIf {
+    resource_addr: address;
+    let account = global<Account>(resource_addr);
+    aborts_if len(account.signer_capability_offer.for.vec) != 0;
+    aborts_if account.sequence_number != 0;
+}
+
+ + + + + +### Function `verify_signed_message` + + +
public fun verify_signed_message<T: drop>(account: address, account_scheme: u8, account_public_key: vector<u8>, signed_message_bytes: vector<u8>, message: T)
+
+ + + + +
pragma aborts_if_is_partial;
+modifies global<Account>(account);
+let account_resource = global<Account>(account);
+aborts_if !exists<Account>(account);
+include account_scheme == ED25519_SCHEME ==> ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: account_public_key };
+aborts_if account_scheme == ED25519_SCHEME && ({
+    let expected_auth_key = ed25519::spec_public_key_bytes_to_authentication_key(account_public_key);
+    account_resource.authentication_key != expected_auth_key
+});
+include account_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewUnvalidatedPublicKeyFromBytesAbortsIf { bytes: account_public_key };
+aborts_if account_scheme == MULTI_ED25519_SCHEME && ({
+    let expected_auth_key = multi_ed25519::spec_public_key_bytes_to_authentication_key(account_public_key);
+    account_resource.authentication_key != expected_auth_key
+});
+include account_scheme == ED25519_SCHEME ==> ed25519::NewSignatureFromBytesAbortsIf { bytes: signed_message_bytes };
+include account_scheme == MULTI_ED25519_SCHEME ==> multi_ed25519::NewSignatureFromBytesAbortsIf { bytes: signed_message_bytes };
+aborts_if account_scheme != ED25519_SCHEME && account_scheme != MULTI_ED25519_SCHEME;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator.md new file mode 100644 index 0000000000000..4d2bfca013199 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator.md @@ -0,0 +1,474 @@ + + + +# Module `0x1::aggregator` + +This module provides an interface for aggregators. Aggregators are similar to +unsigned integers and support addition and subtraction (aborting on underflow +or on overflowing a custom upper limit). The difference from integers is that +aggregators allow to perform both additions and subtractions in parallel across +multiple transactions, enabling parallel execution. For example, if the first +transaction is doing add(X, 1) for aggregator resource X, and the second +is doing sub(X,3), they can be executed in parallel avoiding a read-modify-write +dependency. +However, reading the aggregator value (i.e. calling read(X)) is an expensive +operation and should be avoided as much as possible because it reduces the +parallelism. Moreover, **aggregators can only be created by Aptos Framework (0x1) +at the moment.** + + +- [Struct `Aggregator`](#0x1_aggregator_Aggregator) +- [Constants](#@Constants_0) +- [Function `limit`](#0x1_aggregator_limit) +- [Function `add`](#0x1_aggregator_add) +- [Function `sub`](#0x1_aggregator_sub) +- [Function `read`](#0x1_aggregator_read) +- [Function `destroy`](#0x1_aggregator_destroy) +- [Specification](#@Specification_1) + - [Struct `Aggregator`](#@Specification_1_Aggregator) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `limit`](#@Specification_1_limit) + - [Function `add`](#@Specification_1_add) + - [Function `sub`](#@Specification_1_sub) + - [Function `read`](#@Specification_1_read) + - [Function `destroy`](#@Specification_1_destroy) + + +
+ + + + + +## Struct `Aggregator` + +Represents an integer which supports parallel additions and subtractions +across multiple transactions. See the module description for more details. + + +
struct Aggregator has store
+
+ + + +
+Fields + + +
+
+handle: address +
+
+ +
+
+key: address +
+
+ +
+
+limit: u128 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The value of aggregator overflows. Raised by native code. + + +
const EAGGREGATOR_OVERFLOW: u64 = 1;
+
+ + + + + +The value of aggregator underflows (goes below zero). Raised by native code. + + +
const EAGGREGATOR_UNDERFLOW: u64 = 2;
+
+ + + + + +Aggregator feature is not supported. Raised by native code. + + +
const ENOT_SUPPORTED: u64 = 3;
+
+ + + + + +## Function `limit` + +Returns limit exceeding which aggregator overflows. + + +
public fun limit(aggregator: &aggregator::Aggregator): u128
+
+ + + +
+Implementation + + +
public fun limit(aggregator: &Aggregator): u128 {
+    aggregator.limit
+}
+
+ + + +
+ + + +## Function `add` + +Adds value to aggregator. Aborts on overflowing the limit. + + +
public fun add(aggregator: &mut aggregator::Aggregator, value: u128)
+
+ + + +
+Implementation + + +
public native fun add(aggregator: &mut Aggregator, value: u128);
+
+ + + +
+ + + +## Function `sub` + +Subtracts value from aggregator. Aborts on going below zero. + + +
public fun sub(aggregator: &mut aggregator::Aggregator, value: u128)
+
+ + + +
+Implementation + + +
public native fun sub(aggregator: &mut Aggregator, value: u128);
+
+ + + +
+ + + +## Function `read` + +Returns a value stored in this aggregator. + + +
public fun read(aggregator: &aggregator::Aggregator): u128
+
+ + + +
+Implementation + + +
public native fun read(aggregator: &Aggregator): u128;
+
+ + + +
+ + + +## Function `destroy` + +Destroys an aggregator and removes it from its AggregatorFactory. + + +
public fun destroy(aggregator: aggregator::Aggregator)
+
+ + + +
+Implementation + + +
public native fun destroy(aggregator: Aggregator);
+
+ + + +
+ + + +## Specification + + + + +### Struct `Aggregator` + + +
struct Aggregator has store
+
+ + + +
+
+handle: address +
+
+ +
+
+key: address +
+
+ +
+
+limit: u128 +
+
+ +
+
+ + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1For a given aggregator, it should always be possible to: Return the limit value of the aggregator. Return the current value stored in the aggregator. Destroy an aggregator, removing it from its AggregatorFactory.LowThe following functions should not abort if EventHandle exists: limit(), read(), destroy().Formally verified via read, destroy, and limit.
2If the value during addition exceeds the limit, an overflow occurs.HighThe native add() function checks the value of the addition to ensure it does not pass the defined limit and results in aggregator overflow.Formally verified via add.
3Operations over aggregators should be correct.HighThe implementation of the add, sub, read and destroy functions is correct.The native implementation of the add, sub, read and destroy functions have been manually audited.
+ + + + + + +### Module-level Specification + + +
pragma intrinsic;
+
+ + + + + +### Function `limit` + + +
public fun limit(aggregator: &aggregator::Aggregator): u128
+
+ + + + +
pragma opaque;
+// This enforces high-level requirement 1:
+aborts_if false;
+ensures [abstract] result == spec_get_limit(aggregator);
+
+ + + + + + + +
native fun spec_read(aggregator: Aggregator): u128;
+
+ + + + + + + +
native fun spec_get_limit(a: Aggregator): u128;
+
+ + + + + + + +
native fun spec_get_handle(a: Aggregator): u128;
+
+ + + + + + + +
native fun spec_get_key(a: Aggregator): u128;
+
+ + + + + + + +
native fun spec_aggregator_set_val(a: Aggregator, v: u128): Aggregator;
+
+ + + + + + + +
native fun spec_aggregator_get_val(a: Aggregator): u128;
+
+ + + + + +### Function `add` + + +
public fun add(aggregator: &mut aggregator::Aggregator, value: u128)
+
+ + + + +
pragma opaque;
+aborts_if spec_aggregator_get_val(aggregator) + value > spec_get_limit(aggregator);
+// This enforces high-level requirement 2:
+aborts_if spec_aggregator_get_val(aggregator) + value > MAX_U128;
+ensures spec_get_limit(aggregator) == spec_get_limit(old(aggregator));
+ensures aggregator == spec_aggregator_set_val(old(aggregator),
+    spec_aggregator_get_val(old(aggregator)) + value);
+
+ + + + + +### Function `sub` + + +
public fun sub(aggregator: &mut aggregator::Aggregator, value: u128)
+
+ + + + +
pragma opaque;
+aborts_if spec_aggregator_get_val(aggregator) < value;
+ensures spec_get_limit(aggregator) == spec_get_limit(old(aggregator));
+ensures aggregator == spec_aggregator_set_val(old(aggregator),
+    spec_aggregator_get_val(old(aggregator)) - value);
+
+ + + + + +### Function `read` + + +
public fun read(aggregator: &aggregator::Aggregator): u128
+
+ + + + +
pragma opaque;
+// This enforces high-level requirement 1:
+aborts_if false;
+ensures result == spec_read(aggregator);
+ensures result <= spec_get_limit(aggregator);
+
+ + + + + +### Function `destroy` + + +
public fun destroy(aggregator: aggregator::Aggregator)
+
+ + + + +
pragma opaque;
+// This enforces high-level requirement 1:
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator_factory.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator_factory.md new file mode 100644 index 0000000000000..7f92cf43addf1 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator_factory.md @@ -0,0 +1,355 @@ + + + +# Module `0x1::aggregator_factory` + +This module provides foundations to create aggregators. Currently only +Aptos Framework (0x1) can create them, so this module helps to wrap +the constructor of Aggregator struct so that only a system account +can initialize one. In the future, this might change and aggregators +can be enabled for the public. + + +- [Resource `AggregatorFactory`](#0x1_aggregator_factory_AggregatorFactory) +- [Constants](#@Constants_0) +- [Function `initialize_aggregator_factory`](#0x1_aggregator_factory_initialize_aggregator_factory) +- [Function `create_aggregator_internal`](#0x1_aggregator_factory_create_aggregator_internal) +- [Function `create_aggregator`](#0x1_aggregator_factory_create_aggregator) +- [Function `new_aggregator`](#0x1_aggregator_factory_new_aggregator) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize_aggregator_factory`](#@Specification_1_initialize_aggregator_factory) + - [Function `create_aggregator_internal`](#@Specification_1_create_aggregator_internal) + - [Function `create_aggregator`](#@Specification_1_create_aggregator) + - [Function `new_aggregator`](#@Specification_1_new_aggregator) + + +
use 0x1::aggregator;
+use 0x1::error;
+use 0x1::system_addresses;
+use 0x1::table;
+
+ + + + + +## Resource `AggregatorFactory` + +Creates new aggregators. Used to control the numbers of aggregators in the +system and who can create them. At the moment, only Aptos Framework (0x1) +account can. + + +
struct AggregatorFactory has key
+
+ + + +
+Fields + + +
+
+phantom_table: table::Table<address, u128> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Aggregator factory is not published yet. + + +
const EAGGREGATOR_FACTORY_NOT_FOUND: u64 = 1;
+
+ + + + + +## Function `initialize_aggregator_factory` + +Creates a new factory for aggregators. Can only be called during genesis. + + +
public(friend) fun initialize_aggregator_factory(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize_aggregator_factory(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let aggregator_factory = AggregatorFactory {
+        phantom_table: table::new()
+    };
+    move_to(aptos_framework, aggregator_factory);
+}
+
+ + + +
+ + + +## Function `create_aggregator_internal` + +Creates a new aggregator instance which overflows on exceeding a limit. + + +
public(friend) fun create_aggregator_internal(limit: u128): aggregator::Aggregator
+
+ + + +
+Implementation + + +
public(friend) fun create_aggregator_internal(limit: u128): Aggregator acquires AggregatorFactory {
+    assert!(
+        exists<AggregatorFactory>(@aptos_framework),
+        error::not_found(EAGGREGATOR_FACTORY_NOT_FOUND)
+    );
+
+    let aggregator_factory = borrow_global_mut<AggregatorFactory>(@aptos_framework);
+    new_aggregator(aggregator_factory, limit)
+}
+
+ + + +
+ + + +## Function `create_aggregator` + +This is currently a function closed for public. This can be updated in the future by on-chain governance +to allow any signer to call. + + +
public fun create_aggregator(account: &signer, limit: u128): aggregator::Aggregator
+
+ + + +
+Implementation + + +
public fun create_aggregator(account: &signer, limit: u128): Aggregator acquires AggregatorFactory {
+    // Only Aptos Framework (0x1) account can call this for now.
+    system_addresses::assert_aptos_framework(account);
+    create_aggregator_internal(limit)
+}
+
+ + + +
+ + + +## Function `new_aggregator` + +Returns a new aggregator. + + +
fun new_aggregator(aggregator_factory: &mut aggregator_factory::AggregatorFactory, limit: u128): aggregator::Aggregator
+
+ + + +
+Implementation + + +
native fun new_aggregator(aggregator_factory: &mut AggregatorFactory, limit: u128): Aggregator;
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During the module's initialization, it guarantees that the Aptos framework is the caller and that the AggregatorFactory resource will move under the Aptos framework account.HighThe initialize function is responsible for establishing the initial state of the module by creating the AggregatorFactory resource, indicating its presence within the module's context. Subsequently, the resource transfers to the Aptos framework account.Formally verified via initialize_aggregator_factory.
2To create a new aggregator instance, the aggregator factory must already be initialized and exist under the Aptos account.HighThe create_aggregator_internal function asserts that AggregatorFactory exists for the Aptos account.Formally verified via CreateAggregatorInternalAbortsIf.
3Only the Aptos framework address may create an aggregator instance currently.LowThe create_aggregator function ensures that the address calling it is the Aptos framework address.Formally verified via create_aggregator.
4The creation of new aggregators should be done correctly.HighThe native new_aggregator function correctly creates a new aggregator.The new_aggregator native function has been manually audited.
+ + + + + + +### Module-level Specification + + +
pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize_aggregator_factory` + + +
public(friend) fun initialize_aggregator_factory(aptos_framework: &signer)
+
+ + +Make sure the caller is @aptos_framework. +AggregatorFactory is not under the caller before creating the resource. + + +
let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if exists<AggregatorFactory>(addr);
+// This enforces high-level requirement 1:
+ensures exists<AggregatorFactory>(addr);
+
+ + + + + +### Function `create_aggregator_internal` + + +
public(friend) fun create_aggregator_internal(limit: u128): aggregator::Aggregator
+
+ + + + +
// This enforces high-level requirement 2:
+include CreateAggregatorInternalAbortsIf;
+ensures aggregator::spec_get_limit(result) == limit;
+ensures aggregator::spec_aggregator_get_val(result) == 0;
+
+ + + + + + + +
schema CreateAggregatorInternalAbortsIf {
+    aborts_if !exists<AggregatorFactory>(@aptos_framework);
+}
+
+ + + + + +### Function `create_aggregator` + + +
public fun create_aggregator(account: &signer, limit: u128): aggregator::Aggregator
+
+ + +Make sure the caller is @aptos_framework. +AggregatorFactory existed under the @aptos_framework when Creating a new aggregator. + + +
let addr = signer::address_of(account);
+// This enforces high-level requirement 3:
+aborts_if addr != @aptos_framework;
+aborts_if !exists<AggregatorFactory>(@aptos_framework);
+
+ + + + + + + +
native fun spec_new_aggregator(limit: u128): Aggregator;
+
+ + + + + +### Function `new_aggregator` + + +
fun new_aggregator(aggregator_factory: &mut aggregator_factory::AggregatorFactory, limit: u128): aggregator::Aggregator
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == spec_new_aggregator(limit);
+ensures aggregator::spec_get_limit(result) == limit;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator_v2.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator_v2.md new file mode 100644 index 0000000000000..70e859a72b509 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aggregator_v2.md @@ -0,0 +1,1011 @@ + + + +# Module `0x1::aggregator_v2` + +This module provides an interface for aggregators (version 2). Aggregators are +similar to unsigned integers and support addition and subtraction (aborting on +underflow or on overflowing a custom upper limit). The difference from integers +is that aggregators allow to perform both additions and subtractions in parallel +across multiple transactions, enabling parallel execution. For example, if the +first transaction is doing try_add(X, 1) for aggregator X, and the second is +doing try_sub(X,3), they can be executed in parallel avoiding a read-modify-write +dependency. +However, reading the aggregator value (i.e. calling read(X)) is a resource-intensive +operation that also reduced parallelism, and should be avoided as much as possible. +If you need to capture the value, without revealing it, use snapshot function instead, +which has no parallelism impact. + +From parallelism considerations, there are three different levels of effects: +* enable full parallelism (cannot create conflicts): +max_value, create_*, snapshot, derive_string_concat +* enable speculative parallelism (generally parallel via branch prediction) +try_add, add, try_sub, sub, is_at_least +* create read/write conflicts, as if you were using a regular field +read, read_snapshot, read_derived_string + + +- [Struct `Aggregator`](#0x1_aggregator_v2_Aggregator) +- [Struct `AggregatorSnapshot`](#0x1_aggregator_v2_AggregatorSnapshot) +- [Struct `DerivedStringSnapshot`](#0x1_aggregator_v2_DerivedStringSnapshot) +- [Constants](#@Constants_0) +- [Function `max_value`](#0x1_aggregator_v2_max_value) +- [Function `create_aggregator`](#0x1_aggregator_v2_create_aggregator) +- [Function `create_aggregator_with_value`](#0x1_aggregator_v2_create_aggregator_with_value) +- [Function `create_unbounded_aggregator`](#0x1_aggregator_v2_create_unbounded_aggregator) +- [Function `create_unbounded_aggregator_with_value`](#0x1_aggregator_v2_create_unbounded_aggregator_with_value) +- [Function `try_add`](#0x1_aggregator_v2_try_add) +- [Function `add`](#0x1_aggregator_v2_add) +- [Function `try_sub`](#0x1_aggregator_v2_try_sub) +- [Function `sub`](#0x1_aggregator_v2_sub) +- [Function `is_at_least_impl`](#0x1_aggregator_v2_is_at_least_impl) +- [Function `is_at_least`](#0x1_aggregator_v2_is_at_least) +- [Function `read`](#0x1_aggregator_v2_read) +- [Function `snapshot`](#0x1_aggregator_v2_snapshot) +- [Function `create_snapshot`](#0x1_aggregator_v2_create_snapshot) +- [Function `read_snapshot`](#0x1_aggregator_v2_read_snapshot) +- [Function `read_derived_string`](#0x1_aggregator_v2_read_derived_string) +- [Function `create_derived_string`](#0x1_aggregator_v2_create_derived_string) +- [Function `derive_string_concat`](#0x1_aggregator_v2_derive_string_concat) +- [Function `copy_snapshot`](#0x1_aggregator_v2_copy_snapshot) +- [Function `string_concat`](#0x1_aggregator_v2_string_concat) +- [Specification](#@Specification_1) + - [Function `create_aggregator`](#@Specification_1_create_aggregator) + - [Function `create_unbounded_aggregator`](#@Specification_1_create_unbounded_aggregator) + - [Function `try_add`](#@Specification_1_try_add) + - [Function `try_sub`](#@Specification_1_try_sub) + - [Function `is_at_least_impl`](#@Specification_1_is_at_least_impl) + - [Function `read`](#@Specification_1_read) + - [Function `snapshot`](#@Specification_1_snapshot) + - [Function `create_snapshot`](#@Specification_1_create_snapshot) + - [Function `read_snapshot`](#@Specification_1_read_snapshot) + - [Function `read_derived_string`](#@Specification_1_read_derived_string) + - [Function `create_derived_string`](#@Specification_1_create_derived_string) + - [Function `derive_string_concat`](#@Specification_1_derive_string_concat) + - [Function `copy_snapshot`](#@Specification_1_copy_snapshot) + - [Function `string_concat`](#@Specification_1_string_concat) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::string;
+
+ + + + + +## Struct `Aggregator` + +Represents an integer which supports parallel additions and subtractions +across multiple transactions. See the module description for more details. + +Currently supported types for IntElement are u64 and u128. + + +
struct Aggregator<IntElement> has drop, store
+
+ + + +
+Fields + + +
+
+value: IntElement +
+
+ +
+
+max_value: IntElement +
+
+ +
+
+ + +
+ + + +## Struct `AggregatorSnapshot` + +Represents a constant value, that was derived from an aggregator at given instant in time. +Unlike read() and storing the value directly, this enables parallel execution of transactions, +while storing snapshot of aggregator state elsewhere. + + +
struct AggregatorSnapshot<IntElement> has drop, store
+
+ + + +
+Fields + + +
+
+value: IntElement +
+
+ +
+
+ + +
+ + + +## Struct `DerivedStringSnapshot` + + + +
struct DerivedStringSnapshot has drop, store
+
+ + + +
+Fields + + +
+
+value: string::String +
+
+ +
+
+padding: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The value of aggregator overflows. Raised by uncoditional add() call + + +
const EAGGREGATOR_OVERFLOW: u64 = 1;
+
+ + + + + +The value of aggregator underflows (goes below zero). Raised by uncoditional sub() call + + +
const EAGGREGATOR_UNDERFLOW: u64 = 2;
+
+ + + + + +The aggregator api v2 feature flag is not enabled. + + +
const EAGGREGATOR_API_V2_NOT_ENABLED: u64 = 6;
+
+ + + + + +The native aggregator function, that is in the move file, is not yet supported. +and any calls will raise this error. + + +
const EAGGREGATOR_FUNCTION_NOT_YET_SUPPORTED: u64 = 9;
+
+ + + + + +Arguments passed to concat exceed max limit of 256 bytes (for prefix and suffix together). + + +
const ECONCAT_STRING_LENGTH_TOO_LARGE: u64 = 8;
+
+ + + + + +The generic type supplied to the aggregator snapshot is not supported. + + +
const EUNSUPPORTED_AGGREGATOR_SNAPSHOT_TYPE: u64 = 5;
+
+ + + + + +The generic type supplied to the aggregator is not supported. + + +
const EUNSUPPORTED_AGGREGATOR_TYPE: u64 = 7;
+
+ + + + + +## Function `max_value` + +Returns max_value exceeding which aggregator overflows. + + +
public fun max_value<IntElement: copy, drop>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
+ + + +
+Implementation + + +
public fun max_value<IntElement: copy + drop>(aggregator: &Aggregator<IntElement>): IntElement {
+    aggregator.max_value
+}
+
+ + + +
+ + + +## Function `create_aggregator` + +Creates new aggregator, with given 'max_value'. + +Currently supported types for IntElement are u64 and u128. +EAGGREGATOR_ELEMENT_TYPE_NOT_SUPPORTED raised if called with a different type. + + +
public fun create_aggregator<IntElement: copy, drop>(max_value: IntElement): aggregator_v2::Aggregator<IntElement>
+
+ + + +
+Implementation + + +
public native fun create_aggregator<IntElement: copy + drop>(max_value: IntElement): Aggregator<IntElement>;
+
+ + + +
+ + + +## Function `create_aggregator_with_value` + + + +
public fun create_aggregator_with_value<IntElement: copy, drop>(start_value: IntElement, max_value: IntElement): aggregator_v2::Aggregator<IntElement>
+
+ + + +
+Implementation + + +
public fun create_aggregator_with_value<IntElement: copy + drop>(start_value: IntElement, max_value: IntElement): Aggregator<IntElement> {
+    let aggregator = create_aggregator(max_value);
+    add(&mut aggregator, start_value);
+    aggregator
+}
+
+ + + +
+ + + +## Function `create_unbounded_aggregator` + +Creates new aggregator, without any 'max_value' on top of the implicit bound restriction +due to the width of the type (i.e. MAX_U64 for u64, MAX_U128 for u128). + +Currently supported types for IntElement are u64 and u128. +EAGGREGATOR_ELEMENT_TYPE_NOT_SUPPORTED raised if called with a different type. + + +
public fun create_unbounded_aggregator<IntElement: copy, drop>(): aggregator_v2::Aggregator<IntElement>
+
+ + + +
+Implementation + + +
public native fun create_unbounded_aggregator<IntElement: copy + drop>(): Aggregator<IntElement>;
+
+ + + +
+ + + +## Function `create_unbounded_aggregator_with_value` + + + +
public fun create_unbounded_aggregator_with_value<IntElement: copy, drop>(start_value: IntElement): aggregator_v2::Aggregator<IntElement>
+
+ + + +
+Implementation + + +
public fun create_unbounded_aggregator_with_value<IntElement: copy + drop>(start_value: IntElement): Aggregator<IntElement> {
+    let aggregator = create_unbounded_aggregator();
+    add(&mut aggregator, start_value);
+    aggregator
+}
+
+ + + +
+ + + +## Function `try_add` + +Adds value to aggregator. +If addition would exceed the max_value, false is returned, and aggregator value is left unchanged. + +Parallelism info: This operation enables speculative parallelism. + + +
public fun try_add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
+ + + +
+Implementation + + +
public native fun try_add<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement): bool;
+
+ + + +
+ + + +## Function `add` + +Adds value to aggregator, unconditionally. +If addition would exceed the max_value, EAGGREGATOR_OVERFLOW exception will be thrown. + +Parallelism info: This operation enables speculative parallelism. + + +
public fun add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + +
+Implementation + + +
public fun add<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement) {
+    assert!(try_add(aggregator, value), error::out_of_range(EAGGREGATOR_OVERFLOW));
+}
+
+ + + +
+ + + +## Function `try_sub` + +Subtracts value from aggregator. +If subtraction would result in a negative value, false is returned, and aggregator value is left unchanged. + +Parallelism info: This operation enables speculative parallelism. + + +
public fun try_sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
+ + + +
+Implementation + + +
public native fun try_sub<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement): bool;
+
+ + + +
+ + + +## Function `sub` + + +Parallelism info: This operation enables speculative parallelism. + + +
public fun sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + +
+Implementation + + +
public fun sub<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement) {
+    assert!(try_sub(aggregator, value), error::out_of_range(EAGGREGATOR_UNDERFLOW));
+}
+
+ + + +
+ + + +## Function `is_at_least_impl` + + + +
fun is_at_least_impl<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
+
+ + + +
+Implementation + + +
native fun is_at_least_impl<IntElement>(aggregator: &Aggregator<IntElement>, min_amount: IntElement): bool;
+
+ + + +
+ + + +## Function `is_at_least` + +Returns true if aggregator value is larger than or equal to the given min_amount, false otherwise. + +This operation is more efficient and much more parallelization friendly than calling read(agg) > min_amount. +Until traits are deployed, is_at_most/is_equal utility methods can be derived from this one (assuming +1 doesn't overflow): +- for is_at_most(agg, max_amount), you can do !is_at_least(max_amount + 1) +- for is_equal(agg, value), you can do is_at_least(value) && !is_at_least(value + 1) + +Parallelism info: This operation enables speculative parallelism. + + +
public fun is_at_least<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
+
+ + + +
+Implementation + + +
public fun is_at_least<IntElement>(aggregator: &Aggregator<IntElement>, min_amount: IntElement): bool {
+    assert!(features::aggregator_v2_is_at_least_api_enabled(), EAGGREGATOR_API_V2_NOT_ENABLED);
+    is_at_least_impl(aggregator, min_amount)
+}
+
+ + + +
+ + + +## Function `read` + +Returns a value stored in this aggregator. +Note: This operation is resource-intensive, and reduces parallelism. +If you need to capture the value, without revealing it, use snapshot function instead, +which has no parallelism impact. +If called in a transaction that also modifies the aggregator, or has other read/write conflicts, +it will sequentialize that transaction. (i.e. up to concurrency_level times slower) +If called in a separate transaction (i.e. after transaction that modifies aggregator), it might be +up to two times slower. + +Parallelism info: This operation *prevents* speculative parallelism. + + +
public fun read<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
+ + + +
+Implementation + + +
public native fun read<IntElement>(aggregator: &Aggregator<IntElement>): IntElement;
+
+ + + +
+ + + +## Function `snapshot` + +Returns a wrapper of a current value of an aggregator +Unlike read(), it is fast and avoids sequential dependencies. + +Parallelism info: This operation enables parallelism. + + +
public fun snapshot<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
+
+ + + +
+Implementation + + +
public native fun snapshot<IntElement>(aggregator: &Aggregator<IntElement>): AggregatorSnapshot<IntElement>;
+
+ + + +
+ + + +## Function `create_snapshot` + +Creates a snapshot of a given value. +Useful for when object is sometimes created via snapshot() or string_concat(), and sometimes directly. + + +
public fun create_snapshot<IntElement: copy, drop>(value: IntElement): aggregator_v2::AggregatorSnapshot<IntElement>
+
+ + + +
+Implementation + + +
public native fun create_snapshot<IntElement: copy + drop>(value: IntElement): AggregatorSnapshot<IntElement>;
+
+ + + +
+ + + +## Function `read_snapshot` + +Returns a value stored in this snapshot. +Note: This operation is resource-intensive, and reduces parallelism. +(Especially if called in a transaction that also modifies the aggregator, +or has other read/write conflicts) + +Parallelism info: This operation *prevents* speculative parallelism. + + +
public fun read_snapshot<IntElement>(snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>): IntElement
+
+ + + +
+Implementation + + +
public native fun read_snapshot<IntElement>(snapshot: &AggregatorSnapshot<IntElement>): IntElement;
+
+ + + +
+ + + +## Function `read_derived_string` + +Returns a value stored in this DerivedStringSnapshot. +Note: This operation is resource-intensive, and reduces parallelism. +(Especially if called in a transaction that also modifies the aggregator, +or has other read/write conflicts) + +Parallelism info: This operation *prevents* speculative parallelism. + + +
public fun read_derived_string(snapshot: &aggregator_v2::DerivedStringSnapshot): string::String
+
+ + + +
+Implementation + + +
public native fun read_derived_string(snapshot: &DerivedStringSnapshot): String;
+
+ + + +
+ + + +## Function `create_derived_string` + +Creates a DerivedStringSnapshot of a given value. +Useful for when object is sometimes created via string_concat(), and sometimes directly. + + +
public fun create_derived_string(value: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + +
+Implementation + + +
public native fun create_derived_string(value: String): DerivedStringSnapshot;
+
+ + + +
+ + + +## Function `derive_string_concat` + +Concatenates before, snapshot and after into a single string. +snapshot passed needs to have integer type - currently supported types are u64 and u128. +Raises EUNSUPPORTED_AGGREGATOR_SNAPSHOT_TYPE if called with another type. +If length of prefix and suffix together exceed 256 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. + +Parallelism info: This operation enables parallelism. + + +
public fun derive_string_concat<IntElement>(before: string::String, snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>, after: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + +
+Implementation + + +
public native fun derive_string_concat<IntElement>(before: String, snapshot: &AggregatorSnapshot<IntElement>, after: String): DerivedStringSnapshot;
+
+ + + +
+ + + +## Function `copy_snapshot` + +NOT YET IMPLEMENTED, always raises EAGGREGATOR_FUNCTION_NOT_YET_SUPPORTED. + + +
#[deprecated]
+public fun copy_snapshot<IntElement: copy, drop>(snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
+
+ + + +
+Implementation + + +
public native fun copy_snapshot<IntElement: copy + drop>(snapshot: &AggregatorSnapshot<IntElement>): AggregatorSnapshot<IntElement>;
+
+ + + +
+ + + +## Function `string_concat` + +DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTION_NOT_YET_SUPPORTED. + + +
#[deprecated]
+public fun string_concat<IntElement>(before: string::String, snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>, after: string::String): aggregator_v2::AggregatorSnapshot<string::String>
+
+ + + +
+Implementation + + +
public native fun string_concat<IntElement>(before: String, snapshot: &AggregatorSnapshot<IntElement>, after: String): AggregatorSnapshot<String>;
+
+ + + +
+ + + +## Specification + + + + +### Function `create_aggregator` + + +
public fun create_aggregator<IntElement: copy, drop>(max_value: IntElement): aggregator_v2::Aggregator<IntElement>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `create_unbounded_aggregator` + + +
public fun create_unbounded_aggregator<IntElement: copy, drop>(): aggregator_v2::Aggregator<IntElement>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `try_add` + + +
public fun try_add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `try_sub` + + +
public fun try_sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `is_at_least_impl` + + +
fun is_at_least_impl<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `read` + + +
public fun read<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `snapshot` + + +
public fun snapshot<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `create_snapshot` + + +
public fun create_snapshot<IntElement: copy, drop>(value: IntElement): aggregator_v2::AggregatorSnapshot<IntElement>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `read_snapshot` + + +
public fun read_snapshot<IntElement>(snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>): IntElement
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `read_derived_string` + + +
public fun read_derived_string(snapshot: &aggregator_v2::DerivedStringSnapshot): string::String
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `create_derived_string` + + +
public fun create_derived_string(value: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `derive_string_concat` + + +
public fun derive_string_concat<IntElement>(before: string::String, snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>, after: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `copy_snapshot` + + +
#[deprecated]
+public fun copy_snapshot<IntElement: copy, drop>(snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `string_concat` + + +
#[deprecated]
+public fun string_concat<IntElement>(before: string::String, snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>, after: string::String): aggregator_v2::AggregatorSnapshot<string::String>
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_account.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_account.md new file mode 100644 index 0000000000000..dbc4f7b687c04 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_account.md @@ -0,0 +1,1242 @@ + + + +# Module `0x1::aptos_account` + + + +- [Resource `DirectTransferConfig`](#0x1_aptos_account_DirectTransferConfig) +- [Struct `DirectCoinTransferConfigUpdatedEvent`](#0x1_aptos_account_DirectCoinTransferConfigUpdatedEvent) +- [Struct `DirectCoinTransferConfigUpdated`](#0x1_aptos_account_DirectCoinTransferConfigUpdated) +- [Constants](#@Constants_0) +- [Function `create_account`](#0x1_aptos_account_create_account) +- [Function `batch_transfer`](#0x1_aptos_account_batch_transfer) +- [Function `transfer`](#0x1_aptos_account_transfer) +- [Function `batch_transfer_coins`](#0x1_aptos_account_batch_transfer_coins) +- [Function `transfer_coins`](#0x1_aptos_account_transfer_coins) +- [Function `deposit_coins`](#0x1_aptos_account_deposit_coins) +- [Function `assert_account_exists`](#0x1_aptos_account_assert_account_exists) +- [Function `assert_account_is_registered_for_apt`](#0x1_aptos_account_assert_account_is_registered_for_apt) +- [Function `set_allow_direct_coin_transfers`](#0x1_aptos_account_set_allow_direct_coin_transfers) +- [Function `can_receive_direct_coin_transfers`](#0x1_aptos_account_can_receive_direct_coin_transfers) +- [Function `register_apt`](#0x1_aptos_account_register_apt) +- [Function `fungible_transfer_only`](#0x1_aptos_account_fungible_transfer_only) +- [Function `is_fungible_balance_at_least`](#0x1_aptos_account_is_fungible_balance_at_least) +- [Function `burn_from_fungible_store`](#0x1_aptos_account_burn_from_fungible_store) +- [Function `ensure_primary_fungible_store_exists`](#0x1_aptos_account_ensure_primary_fungible_store_exists) +- [Function `primary_fungible_store_address`](#0x1_aptos_account_primary_fungible_store_address) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `create_account`](#@Specification_1_create_account) + - [Function `batch_transfer`](#@Specification_1_batch_transfer) + - [Function `transfer`](#@Specification_1_transfer) + - [Function `batch_transfer_coins`](#@Specification_1_batch_transfer_coins) + - [Function `transfer_coins`](#@Specification_1_transfer_coins) + - [Function `deposit_coins`](#@Specification_1_deposit_coins) + - [Function `assert_account_exists`](#@Specification_1_assert_account_exists) + - [Function `assert_account_is_registered_for_apt`](#@Specification_1_assert_account_is_registered_for_apt) + - [Function `set_allow_direct_coin_transfers`](#@Specification_1_set_allow_direct_coin_transfers) + - [Function `can_receive_direct_coin_transfers`](#@Specification_1_can_receive_direct_coin_transfers) + - [Function `register_apt`](#@Specification_1_register_apt) + - [Function `fungible_transfer_only`](#@Specification_1_fungible_transfer_only) + - [Function `is_fungible_balance_at_least`](#@Specification_1_is_fungible_balance_at_least) + - [Function `burn_from_fungible_store`](#@Specification_1_burn_from_fungible_store) + + +
use 0x1::account;
+use 0x1::aptos_coin;
+use 0x1::coin;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::fungible_asset;
+use 0x1::object;
+use 0x1::primary_fungible_store;
+use 0x1::signer;
+
+ + + + + +## Resource `DirectTransferConfig` + +Configuration for whether an account can receive direct transfers of coins that they have not registered. + +By default, this is enabled. Users can opt-out by disabling at any time. + + +
struct DirectTransferConfig has key
+
+ + + +
+Fields + + +
+
+allow_arbitrary_coin_transfers: bool +
+
+ +
+
+update_coin_transfer_events: event::EventHandle<aptos_account::DirectCoinTransferConfigUpdatedEvent> +
+
+ +
+
+ + +
+ + + +## Struct `DirectCoinTransferConfigUpdatedEvent` + +Event emitted when an account's direct coins transfer config is updated. + + +
struct DirectCoinTransferConfigUpdatedEvent has drop, store
+
+ + + +
+Fields + + +
+
+new_allow_direct_transfers: bool +
+
+ +
+
+ + +
+ + + +## Struct `DirectCoinTransferConfigUpdated` + + + +
#[event]
+struct DirectCoinTransferConfigUpdated has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+new_allow_direct_transfers: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Account opted out of receiving coins that they did not register to receive. + + +
const EACCOUNT_DOES_NOT_ACCEPT_DIRECT_COIN_TRANSFERS: u64 = 3;
+
+ + + + + +Account opted out of directly receiving NFT tokens. + + +
const EACCOUNT_DOES_NOT_ACCEPT_DIRECT_TOKEN_TRANSFERS: u64 = 4;
+
+ + + + + +Account does not exist. + + +
const EACCOUNT_NOT_FOUND: u64 = 1;
+
+ + + + + +Account is not registered to receive APT. + + +
const EACCOUNT_NOT_REGISTERED_FOR_APT: u64 = 2;
+
+ + + + + +The lengths of the recipients and amounts lists don't match. + + +
const EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH: u64 = 5;
+
+ + + + + +## Function `create_account` + +Basic account creation methods. + + +
public entry fun create_account(auth_key: address)
+
+ + + +
+Implementation + + +
public entry fun create_account(auth_key: address) {
+    let account_signer = account::create_account(auth_key);
+    register_apt(&account_signer);
+}
+
+ + + +
+ + + +## Function `batch_transfer` + +Batch version of APT transfer. + + +
public entry fun batch_transfer(source: &signer, recipients: vector<address>, amounts: vector<u64>)
+
+ + + +
+Implementation + + +
public entry fun batch_transfer(source: &signer, recipients: vector<address>, amounts: vector<u64>) {
+    let recipients_len = vector::length(&recipients);
+    assert!(
+        recipients_len == vector::length(&amounts),
+        error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH),
+    );
+
+    vector::enumerate_ref(&recipients, |i, to| {
+        let amount = *vector::borrow(&amounts, i);
+        transfer(source, *to, amount);
+    });
+}
+
+ + + +
+ + + +## Function `transfer` + +Convenient function to transfer APT to a recipient account that might not exist. +This would create the recipient account first, which also registers it to receive APT, before transferring. + + +
public entry fun transfer(source: &signer, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer(source: &signer, to: address, amount: u64) {
+    if (!account::exists_at(to)) {
+        create_account(to)
+    };
+
+    if (features::operations_default_to_fa_apt_store_enabled()) {
+        fungible_transfer_only(source, to, amount)
+    } else {
+        // Resource accounts can be created without registering them to receive APT.
+        // This conveniently does the registration if necessary.
+        if (!coin::is_account_registered<AptosCoin>(to)) {
+            coin::register<AptosCoin>(&create_signer(to));
+        };
+        coin::transfer<AptosCoin>(source, to, amount)
+    }
+}
+
+ + + +
+ + + +## Function `batch_transfer_coins` + +Batch version of transfer_coins. + + +
public entry fun batch_transfer_coins<CoinType>(from: &signer, recipients: vector<address>, amounts: vector<u64>)
+
+ + + +
+Implementation + + +
public entry fun batch_transfer_coins<CoinType>(
+    from: &signer, recipients: vector<address>, amounts: vector<u64>) acquires DirectTransferConfig {
+    let recipients_len = vector::length(&recipients);
+    assert!(
+        recipients_len == vector::length(&amounts),
+        error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH),
+    );
+
+    vector::enumerate_ref(&recipients, |i, to| {
+        let amount = *vector::borrow(&amounts, i);
+        transfer_coins<CoinType>(from, *to, amount);
+    });
+}
+
+ + + +
+ + + +## Function `transfer_coins` + +Convenient function to transfer a custom CoinType to a recipient account that might not exist. +This would create the recipient account first and register it to receive the CoinType, before transferring. + + +
public entry fun transfer_coins<CoinType>(from: &signer, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer_coins<CoinType>(from: &signer, to: address, amount: u64) acquires DirectTransferConfig {
+    deposit_coins(to, coin::withdraw<CoinType>(from, amount));
+}
+
+ + + +
+ + + +## Function `deposit_coins` + +Convenient function to deposit a custom CoinType into a recipient account that might not exist. +This would create the recipient account first and register it to receive the CoinType, before transferring. + + +
public fun deposit_coins<CoinType>(to: address, coins: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public fun deposit_coins<CoinType>(to: address, coins: Coin<CoinType>) acquires DirectTransferConfig {
+    if (!account::exists_at(to)) {
+        create_account(to);
+        spec {
+            assert coin::spec_is_account_registered<AptosCoin>(to);
+            assume aptos_std::type_info::type_of<CoinType>() == aptos_std::type_info::type_of<AptosCoin>() ==>
+                coin::spec_is_account_registered<CoinType>(to);
+        };
+    };
+    if (!coin::is_account_registered<CoinType>(to)) {
+        assert!(
+            can_receive_direct_coin_transfers(to),
+            error::permission_denied(EACCOUNT_DOES_NOT_ACCEPT_DIRECT_COIN_TRANSFERS),
+        );
+        coin::register<CoinType>(&create_signer(to));
+    };
+    coin::deposit<CoinType>(to, coins)
+}
+
+ + + +
+ + + +## Function `assert_account_exists` + + + +
public fun assert_account_exists(addr: address)
+
+ + + +
+Implementation + + +
public fun assert_account_exists(addr: address) {
+    assert!(account::exists_at(addr), error::not_found(EACCOUNT_NOT_FOUND));
+}
+
+ + + +
+ + + +## Function `assert_account_is_registered_for_apt` + + + +
public fun assert_account_is_registered_for_apt(addr: address)
+
+ + + +
+Implementation + + +
public fun assert_account_is_registered_for_apt(addr: address) {
+    assert_account_exists(addr);
+    assert!(coin::is_account_registered<AptosCoin>(addr), error::not_found(EACCOUNT_NOT_REGISTERED_FOR_APT));
+}
+
+ + + +
+ + + +## Function `set_allow_direct_coin_transfers` + +Set whether account can receive direct transfers of coins that they have not explicitly registered to receive. + + +
public entry fun set_allow_direct_coin_transfers(account: &signer, allow: bool)
+
+ + + +
+Implementation + + +
public entry fun set_allow_direct_coin_transfers(account: &signer, allow: bool) acquires DirectTransferConfig {
+    let addr = signer::address_of(account);
+    if (exists<DirectTransferConfig>(addr)) {
+        let direct_transfer_config = borrow_global_mut<DirectTransferConfig>(addr);
+        // Short-circuit to avoid emitting an event if direct transfer config is not changing.
+        if (direct_transfer_config.allow_arbitrary_coin_transfers == allow) {
+            return
+        };
+
+        direct_transfer_config.allow_arbitrary_coin_transfers = allow;
+
+        if (std::features::module_event_migration_enabled()) {
+            emit(DirectCoinTransferConfigUpdated { account: addr, new_allow_direct_transfers: allow });
+        };
+        emit_event(
+            &mut direct_transfer_config.update_coin_transfer_events,
+            DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow });
+    } else {
+        let direct_transfer_config = DirectTransferConfig {
+            allow_arbitrary_coin_transfers: allow,
+            update_coin_transfer_events: new_event_handle<DirectCoinTransferConfigUpdatedEvent>(account),
+        };
+        if (std::features::module_event_migration_enabled()) {
+            emit(DirectCoinTransferConfigUpdated { account: addr, new_allow_direct_transfers: allow });
+        };
+        emit_event(
+            &mut direct_transfer_config.update_coin_transfer_events,
+            DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow });
+        move_to(account, direct_transfer_config);
+    };
+}
+
+ + + +
+ + + +## Function `can_receive_direct_coin_transfers` + +Return true if account can receive direct transfers of coins that they have not explicitly registered to +receive. + +By default, this returns true if an account has not explicitly set whether the can receive direct transfers. + + +
#[view]
+public fun can_receive_direct_coin_transfers(account: address): bool
+
+ + + +
+Implementation + + +
public fun can_receive_direct_coin_transfers(account: address): bool acquires DirectTransferConfig {
+    !exists<DirectTransferConfig>(account) ||
+        borrow_global<DirectTransferConfig>(account).allow_arbitrary_coin_transfers
+}
+
+ + + +
+ + + +## Function `register_apt` + + + +
public(friend) fun register_apt(account_signer: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun register_apt(account_signer: &signer) {
+    if (features::new_accounts_default_to_fa_apt_store_enabled()) {
+        ensure_primary_fungible_store_exists(signer::address_of(account_signer));
+    } else {
+        coin::register<AptosCoin>(account_signer);
+    }
+}
+
+ + + +
+ + + +## Function `fungible_transfer_only` + +APT Primary Fungible Store specific specialized functions, +Utilized internally once migration of APT to FungibleAsset is complete. +Convenient function to transfer APT to a recipient account that might not exist. +This would create the recipient APT PFS first, which also registers it to receive APT, before transferring. +TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way +to transfer APT) - if we want to allow APT PFS without account itself + + +
public(friend) entry fun fungible_transfer_only(source: &signer, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public(friend) entry fun fungible_transfer_only(
+    source: &signer, to: address, amount: u64
+) {
+    let sender_store = ensure_primary_fungible_store_exists(signer::address_of(source));
+    let recipient_store = ensure_primary_fungible_store_exists(to);
+
+    // use internal APIs, as they skip:
+    // - owner, frozen and dispatchable checks
+    // as APT cannot be frozen or have dispatch, and PFS cannot be transfered
+    // (PFS could potentially be burned. regular transfer would permanently unburn the store.
+    // Ignoring the check here has the equivalent of unburning, transfers, and then burning again)
+    fungible_asset::deposit_internal(recipient_store, fungible_asset::withdraw_internal(sender_store, amount));
+}
+
+ + + +
+ + + +## Function `is_fungible_balance_at_least` + +Is balance from APT Primary FungibleStore at least the given amount + + +
public(friend) fun is_fungible_balance_at_least(account: address, amount: u64): bool
+
+ + + +
+Implementation + + +
public(friend) fun is_fungible_balance_at_least(account: address, amount: u64): bool {
+    let store_addr = primary_fungible_store_address(account);
+    fungible_asset::is_address_balance_at_least(store_addr, amount)
+}
+
+ + + +
+ + + +## Function `burn_from_fungible_store` + +Burn from APT Primary FungibleStore + + +
public(friend) fun burn_from_fungible_store(ref: &fungible_asset::BurnRef, account: address, amount: u64)
+
+ + + +
+Implementation + + +
public(friend) fun burn_from_fungible_store(
+    ref: &BurnRef,
+    account: address,
+    amount: u64,
+) {
+    // Skip burning if amount is zero. This shouldn't error out as it's called as part of transaction fee burning.
+    if (amount != 0) {
+        let store_addr = primary_fungible_store_address(account);
+        fungible_asset::address_burn_from(ref, store_addr, amount);
+    };
+}
+
+ + + +
+ + + +## Function `ensure_primary_fungible_store_exists` + +Ensure that APT Primary FungibleStore exists (and create if it doesn't) + + +
fun ensure_primary_fungible_store_exists(owner: address): address
+
+ + + +
+Implementation + + +
inline fun ensure_primary_fungible_store_exists(owner: address): address {
+    let store_addr = primary_fungible_store_address(owner);
+    if (fungible_asset::store_exists(store_addr)) {
+        store_addr
+    } else {
+        object::object_address(&primary_fungible_store::create_primary_store(owner, object::address_to_object<Metadata>(@aptos_fungible_asset)))
+    }
+}
+
+ + + +
+ + + +## Function `primary_fungible_store_address` + +Address of APT Primary Fungible Store + + +
fun primary_fungible_store_address(account: address): address
+
+ + + +
+Implementation + + +
inline fun primary_fungible_store_address(account: address): address {
+    object::create_user_derived_object_address(account, @aptos_fungible_asset)
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During the creation of an Aptos account the following rules should hold: (1) the authentication key should be 32 bytes in length, (2) an Aptos account should not already exist for that authentication key, and (3) the address of the authentication key should not be equal to a reserved address (0x0, 0x1, or 0x3).CriticalThe authentication key which is passed in as an argument to create_account should satisfy all necessary conditions.Formally verified via CreateAccountAbortsIf.
2After creating an Aptos account, the account should become registered to receive AptosCoin.CriticalThe create_account function creates a new account for the particular address and registers AptosCoin.Formally verified via create_account.
3An account may receive a direct transfer of coins they have not registered for if and only if the transfer of arbitrary coins is enabled. By default the option should always set to be enabled for an account.LowTransfers of a coin to an account that has not yet registered for that coin should abort if and only if the allow_arbitrary_coin_transfers flag is explicitly set to false.Formally verified via can_receive_direct_coin_transfers.
4Setting direct coin transfers may only occur if and only if a direct transfer config is associated with the provided account address.LowThe set_allow_direct_coin_transfers function ensures the DirectTransferConfig structure exists for the signer.Formally verified via set_allow_direct_coin_transfers.
5The transfer function should ensure an account is created for the provided destination if one does not exist; then, register AptosCoin for that account if a particular is unregistered before transferring the amount.CriticalThe transfer function checks if the recipient account exists. If the account does not exist, the function creates one and registers the account to AptosCoin if not registered.Formally verified via transfer.
6Creating an account for the provided destination and registering it for that particular CoinType should be the only way to enable depositing coins, provided the account does not already exist.CriticalThe deposit_coins function verifies if the recipient account exists. If the account does not exist, the function creates one and ensures that the account becomes registered for the specified CointType.Formally verified via deposit_coins.
7When performing a batch transfer of Aptos Coin and/or a batch transfer of a custom coin type, it should ensure that the vector containing destination addresses and the vector containing the corresponding amounts are equal in length.LowThe batch_transfer and batch_transfer_coins functions verify that the length of the recipient addresses vector matches the length of the amount vector through an assertion.Formally verified via batch_transfer_coins.
+ + + + + + +### Module-level Specification + + +
pragma aborts_if_is_strict;
+
+ + + + + +### Function `create_account` + + +
public entry fun create_account(auth_key: address)
+
+ + +Check if the bytes of the auth_key is 32. +The Account does not exist under the auth_key before creating the account. +Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_toke. + + +
// This enforces high-level requirement 1:
+pragma aborts_if_is_partial;
+include CreateAccountAbortsIf;
+ensures exists<account::Account>(auth_key);
+
+ + + + + + + +
schema CreateAccountAbortsIf {
+    auth_key: address;
+    aborts_if exists<account::Account>(auth_key);
+    aborts_if length_judgment(auth_key);
+    aborts_if auth_key == @vm_reserved || auth_key == @aptos_framework || auth_key == @aptos_token;
+}
+
+ + + + + + + +
fun length_judgment(auth_key: address): bool {
+   use std::bcs;
+
+   let authentication_key = bcs::to_bytes(auth_key);
+   len(authentication_key) != 32
+}
+
+ + + + + +### Function `batch_transfer` + + +
public entry fun batch_transfer(source: &signer, recipients: vector<address>, amounts: vector<u64>)
+
+ + + + +
pragma verify = false;
+let account_addr_source = signer::address_of(source);
+let coin_store_source = global<coin::CoinStore<AptosCoin>>(account_addr_source);
+let balance_source = coin_store_source.coin.value;
+aborts_if len(recipients) != len(amounts);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && length_judgment(recipients[i]);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token);
+ensures forall i in 0..len(recipients):
+        (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) &&
+            (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token));
+aborts_if exists i in 0..len(recipients):
+    !exists<coin::CoinStore<AptosCoin>>(account_addr_source);
+aborts_if exists i in 0..len(recipients):
+    coin_store_source.frozen;
+aborts_if exists i in 0..len(recipients):
+    global<coin::CoinStore<AptosCoin>>(account_addr_source).coin.value < amounts[i];
+aborts_if exists i in 0..len(recipients):
+    exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<coin::CoinStore<AptosCoin>>(recipients[i]).frozen;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
+
+ + + + + +### Function `transfer` + + +
public entry fun transfer(source: &signer, to: address, amount: u64)
+
+ + + + +
pragma verify = false;
+let account_addr_source = signer::address_of(source);
+include CreateAccountTransferAbortsIf;
+include GuidAbortsIf<AptosCoin>;
+include WithdrawAbortsIf<AptosCoin>{from: source};
+include TransferEnsures<AptosCoin>;
+aborts_if exists<coin::CoinStore<AptosCoin>>(to) && global<coin::CoinStore<AptosCoin>>(to).frozen;
+// This enforces high-level requirement 5:
+ensures exists<aptos_framework::account::Account>(to);
+ensures exists<coin::CoinStore<AptosCoin>>(to);
+
+ + + + + +### Function `batch_transfer_coins` + + +
public entry fun batch_transfer_coins<CoinType>(from: &signer, recipients: vector<address>, amounts: vector<u64>)
+
+ + + + +
pragma verify = false;
+let account_addr_source = signer::address_of(from);
+let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
+let balance_source = coin_store_source.coin.value;
+// This enforces high-level requirement 7:
+aborts_if len(recipients) != len(amounts);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && length_judgment(recipients[i]);
+aborts_if exists i in 0..len(recipients):
+        !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token);
+ensures forall i in 0..len(recipients):
+        (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) &&
+            (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token));
+aborts_if exists i in 0..len(recipients):
+    !exists<coin::CoinStore<CoinType>>(account_addr_source);
+aborts_if exists i in 0..len(recipients):
+    coin_store_source.frozen;
+aborts_if exists i in 0..len(recipients):
+    global<coin::CoinStore<CoinType>>(account_addr_source).coin.value < amounts[i];
+aborts_if exists i in 0..len(recipients):
+    exists<coin::CoinStore<CoinType>>(recipients[i]) && global<coin::CoinStore<CoinType>>(recipients[i]).frozen;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+aborts_if exists i in 0..len(recipients):
+    account::exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
+aborts_if exists i in 0..len(recipients):
+    !coin::spec_is_account_registered<CoinType>(recipients[i]) && !type_info::spec_is_struct<CoinType>();
+
+ + + + + +### Function `transfer_coins` + + +
public entry fun transfer_coins<CoinType>(from: &signer, to: address, amount: u64)
+
+ + + + +
pragma verify = false;
+let account_addr_source = signer::address_of(from);
+include CreateAccountTransferAbortsIf;
+include WithdrawAbortsIf<CoinType>;
+include GuidAbortsIf<CoinType>;
+include RegistCoinAbortsIf<CoinType>;
+include TransferEnsures<CoinType>;
+aborts_if exists<coin::CoinStore<CoinType>>(to) && global<coin::CoinStore<CoinType>>(to).frozen;
+ensures exists<aptos_framework::account::Account>(to);
+ensures exists<aptos_framework::coin::CoinStore<CoinType>>(to);
+
+ + + + + +### Function `deposit_coins` + + +
public fun deposit_coins<CoinType>(to: address, coins: coin::Coin<CoinType>)
+
+ + + + +
pragma verify = false;
+include CreateAccountTransferAbortsIf;
+include GuidAbortsIf<CoinType>;
+include RegistCoinAbortsIf<CoinType>;
+let if_exist_coin = exists<coin::CoinStore<CoinType>>(to);
+aborts_if if_exist_coin && global<coin::CoinStore<CoinType>>(to).frozen;
+// This enforces high-level requirement 6:
+ensures exists<aptos_framework::account::Account>(to);
+ensures exists<aptos_framework::coin::CoinStore<CoinType>>(to);
+let coin_store_to = global<coin::CoinStore<CoinType>>(to).coin.value;
+let post post_coin_store_to = global<coin::CoinStore<CoinType>>(to).coin.value;
+ensures if_exist_coin ==> post_coin_store_to == coin_store_to + coins.value;
+
+ + + + + +### Function `assert_account_exists` + + +
public fun assert_account_exists(addr: address)
+
+ + + + +
aborts_if !account::exists_at(addr);
+
+ + + + + +### Function `assert_account_is_registered_for_apt` + + +
public fun assert_account_is_registered_for_apt(addr: address)
+
+ + +Check if the address existed. +Check if the AptosCoin under the address existed. + + +
pragma aborts_if_is_partial;
+aborts_if !account::exists_at(addr);
+aborts_if !coin::spec_is_account_registered<AptosCoin>(addr);
+
+ + + + + +### Function `set_allow_direct_coin_transfers` + + +
public entry fun set_allow_direct_coin_transfers(account: &signer, allow: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `can_receive_direct_coin_transfers` + + +
#[view]
+public fun can_receive_direct_coin_transfers(account: address): bool
+
+ + + + +
aborts_if false;
+// This enforces high-level requirement 3:
+ensures result == (
+    !exists<DirectTransferConfig>(account) ||
+        global<DirectTransferConfig>(account).allow_arbitrary_coin_transfers
+);
+
+ + + + + +### Function `register_apt` + + +
public(friend) fun register_apt(account_signer: &signer)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `fungible_transfer_only` + + +
public(friend) entry fun fungible_transfer_only(source: &signer, to: address, amount: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `is_fungible_balance_at_least` + + +
public(friend) fun is_fungible_balance_at_least(account: address, amount: u64): bool
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `burn_from_fungible_store` + + +
public(friend) fun burn_from_fungible_store(ref: &fungible_asset::BurnRef, account: address, amount: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + + + +
schema CreateAccountTransferAbortsIf {
+    to: address;
+    aborts_if !account::exists_at(to) && length_judgment(to);
+    aborts_if !account::exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token);
+}
+
+ + + + + + + +
schema WithdrawAbortsIf<CoinType> {
+    from: &signer;
+    amount: u64;
+    let account_addr_source = signer::address_of(from);
+    let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
+    let balance_source = coin_store_source.coin.value;
+    aborts_if !exists<coin::CoinStore<CoinType>>(account_addr_source);
+    aborts_if coin_store_source.frozen;
+    aborts_if balance_source < amount;
+}
+
+ + + + + + + +
schema GuidAbortsIf<CoinType> {
+    to: address;
+    let acc = global<account::Account>(to);
+    aborts_if account::exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if account::exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 > MAX_U64;
+}
+
+ + + + + + + +
schema RegistCoinAbortsIf<CoinType> {
+    to: address;
+    aborts_if !coin::spec_is_account_registered<CoinType>(to) && !type_info::spec_is_struct<CoinType>();
+    aborts_if exists<aptos_framework::account::Account>(to);
+    aborts_if type_info::type_of<CoinType>() != type_info::type_of<AptosCoin>();
+}
+
+ + + + + + + +
schema TransferEnsures<CoinType> {
+    to: address;
+    account_addr_source: address;
+    amount: u64;
+    let if_exist_account = exists<account::Account>(to);
+    let if_exist_coin = exists<coin::CoinStore<CoinType>>(to);
+    let coin_store_to = global<coin::CoinStore<CoinType>>(to);
+    let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
+    let post p_coin_store_to = global<coin::CoinStore<CoinType>>(to);
+    let post p_coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
+    ensures coin_store_source.coin.value - amount == p_coin_store_source.coin.value;
+    ensures if_exist_account && if_exist_coin ==> coin_store_to.coin.value + amount == p_coin_store_to.coin.value;
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_coin.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_coin.md new file mode 100644 index 0000000000000..5f8c7c73e1502 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_coin.md @@ -0,0 +1,658 @@ + + + +# Module `0x1::aptos_coin` + +This module defines a minimal and generic Coin and Balance. +modified from https://github.com/move-language/move/tree/main/language/documentation/tutorial + + +- [Resource `AptosCoin`](#0x1_aptos_coin_AptosCoin) +- [Resource `MintCapStore`](#0x1_aptos_coin_MintCapStore) +- [Struct `DelegatedMintCapability`](#0x1_aptos_coin_DelegatedMintCapability) +- [Resource `Delegations`](#0x1_aptos_coin_Delegations) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_aptos_coin_initialize) +- [Function `has_mint_capability`](#0x1_aptos_coin_has_mint_capability) +- [Function `destroy_mint_cap`](#0x1_aptos_coin_destroy_mint_cap) +- [Function `configure_accounts_for_test`](#0x1_aptos_coin_configure_accounts_for_test) +- [Function `mint`](#0x1_aptos_coin_mint) +- [Function `delegate_mint_capability`](#0x1_aptos_coin_delegate_mint_capability) +- [Function `claim_mint_capability`](#0x1_aptos_coin_claim_mint_capability) +- [Function `find_delegation`](#0x1_aptos_coin_find_delegation) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `destroy_mint_cap`](#@Specification_1_destroy_mint_cap) + - [Function `configure_accounts_for_test`](#@Specification_1_configure_accounts_for_test) + - [Function `mint`](#@Specification_1_mint) + - [Function `delegate_mint_capability`](#@Specification_1_delegate_mint_capability) + - [Function `claim_mint_capability`](#@Specification_1_claim_mint_capability) + - [Function `find_delegation`](#@Specification_1_find_delegation) + + +
use 0x1::coin;
+use 0x1::error;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::vector;
+
+ + + + + +## Resource `AptosCoin` + + + +
struct AptosCoin has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `MintCapStore` + + + +
struct MintCapStore has key
+
+ + + +
+Fields + + +
+
+mint_cap: coin::MintCapability<aptos_coin::AptosCoin> +
+
+ +
+
+ + +
+ + + +## Struct `DelegatedMintCapability` + +Delegation token created by delegator and can be claimed by the delegatee as MintCapability. + + +
struct DelegatedMintCapability has store
+
+ + + +
+Fields + + +
+
+to: address +
+
+ +
+
+ + +
+ + + +## Resource `Delegations` + +The container stores the current pending delegations. + + +
struct Delegations has key
+
+ + + +
+Fields + + +
+
+inner: vector<aptos_coin::DelegatedMintCapability> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Mint capability has already been delegated to this specified address + + +
const EALREADY_DELEGATED: u64 = 2;
+
+ + + + + +Cannot find delegation of mint capability to this account + + +
const EDELEGATION_NOT_FOUND: u64 = 3;
+
+ + + + + +Account does not have mint capability + + +
const ENO_CAPABILITIES: u64 = 1;
+
+ + + + + +## Function `initialize` + +Can only called during genesis to initialize the Aptos coin. + + +
public(friend) fun initialize(aptos_framework: &signer): (coin::BurnCapability<aptos_coin::AptosCoin>, coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer): (BurnCapability<AptosCoin>, MintCapability<AptosCoin>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let (burn_cap, freeze_cap, mint_cap) = coin::initialize_with_parallelizable_supply<AptosCoin>(
+        aptos_framework,
+        string::utf8(b"Aptos Coin"),
+        string::utf8(b"APT"),
+        8, // decimals
+        true, // monitor_supply
+    );
+
+    // Aptos framework needs mint cap to mint coins to initial validators. This will be revoked once the validators
+    // have been initialized.
+    move_to(aptos_framework, MintCapStore { mint_cap });
+
+    coin::destroy_freeze_cap(freeze_cap);
+    (burn_cap, mint_cap)
+}
+
+ + + +
+ + + +## Function `has_mint_capability` + + + +
public fun has_mint_capability(account: &signer): bool
+
+ + + +
+Implementation + + +
public fun has_mint_capability(account: &signer): bool {
+    exists<MintCapStore>(signer::address_of(account))
+}
+
+ + + +
+ + + +## Function `destroy_mint_cap` + +Only called during genesis to destroy the aptos framework account's mint capability once all initial validators +and accounts have been initialized during genesis. + + +
public(friend) fun destroy_mint_cap(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun destroy_mint_cap(aptos_framework: &signer) acquires MintCapStore {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let MintCapStore { mint_cap } = move_from<MintCapStore>(@aptos_framework);
+    coin::destroy_mint_cap(mint_cap);
+}
+
+ + + +
+ + + +## Function `configure_accounts_for_test` + +Can only be called during genesis for tests to grant mint capability to aptos framework and core resources +accounts. +Expects account and APT store to be registered before calling. + + +
public(friend) fun configure_accounts_for_test(aptos_framework: &signer, core_resources: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public(friend) fun configure_accounts_for_test(
+    aptos_framework: &signer,
+    core_resources: &signer,
+    mint_cap: MintCapability<AptosCoin>,
+) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    // Mint the core resource account AptosCoin for gas so it can execute system transactions.
+    let coins = coin::mint<AptosCoin>(
+        18446744073709551615,
+        &mint_cap,
+    );
+    coin::deposit<AptosCoin>(signer::address_of(core_resources), coins);
+
+    move_to(core_resources, MintCapStore { mint_cap });
+    move_to(core_resources, Delegations { inner: vector::empty() });
+}
+
+ + + +
+ + + +## Function `mint` + +Only callable in tests and testnets where the core resources account exists. +Create new coins and deposit them into dst_addr's account. + + +
public entry fun mint(account: &signer, dst_addr: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun mint(
+    account: &signer,
+    dst_addr: address,
+    amount: u64,
+) acquires MintCapStore {
+    let account_addr = signer::address_of(account);
+
+    assert!(
+        exists<MintCapStore>(account_addr),
+        error::not_found(ENO_CAPABILITIES),
+    );
+
+    let mint_cap = &borrow_global<MintCapStore>(account_addr).mint_cap;
+    let coins_minted = coin::mint<AptosCoin>(amount, mint_cap);
+    coin::deposit<AptosCoin>(dst_addr, coins_minted);
+}
+
+ + + +
+ + + +## Function `delegate_mint_capability` + +Only callable in tests and testnets where the core resources account exists. +Create delegated token for the address so the account could claim MintCapability later. + + +
public entry fun delegate_mint_capability(account: signer, to: address)
+
+ + + +
+Implementation + + +
public entry fun delegate_mint_capability(account: signer, to: address) acquires Delegations {
+    system_addresses::assert_core_resource(&account);
+    let delegations = &mut borrow_global_mut<Delegations>(@core_resources).inner;
+    vector::for_each_ref(delegations, |element| {
+        let element: &DelegatedMintCapability = element;
+        assert!(element.to != to, error::invalid_argument(EALREADY_DELEGATED));
+    });
+    vector::push_back(delegations, DelegatedMintCapability { to });
+}
+
+ + + +
+ + + +## Function `claim_mint_capability` + +Only callable in tests and testnets where the core resources account exists. +Claim the delegated mint capability and destroy the delegated token. + + +
public entry fun claim_mint_capability(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun claim_mint_capability(account: &signer) acquires Delegations, MintCapStore {
+    let maybe_index = find_delegation(signer::address_of(account));
+    assert!(option::is_some(&maybe_index), EDELEGATION_NOT_FOUND);
+    let idx = *option::borrow(&maybe_index);
+    let delegations = &mut borrow_global_mut<Delegations>(@core_resources).inner;
+    let DelegatedMintCapability { to: _ } = vector::swap_remove(delegations, idx);
+
+    // Make a copy of mint cap and give it to the specified account.
+    let mint_cap = borrow_global<MintCapStore>(@core_resources).mint_cap;
+    move_to(account, MintCapStore { mint_cap });
+}
+
+ + + +
+ + + +## Function `find_delegation` + + + +
fun find_delegation(addr: address): option::Option<u64>
+
+ + + +
+Implementation + + +
fun find_delegation(addr: address): Option<u64> acquires Delegations {
+    let delegations = &borrow_global<Delegations>(@core_resources).inner;
+    let i = 0;
+    let len = vector::length(delegations);
+    let index = option::none();
+    while (i < len) {
+        let element = vector::borrow(delegations, i);
+        if (element.to == addr) {
+            index = option::some(i);
+            break
+        };
+        i = i + 1;
+    };
+    index
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The native token, APT, must be initialized during genesis.MediumThe initialize function is only called once, during genesis.Formally verified via initialize.
2The APT coin may only be created exactly once.MediumThe initialization function may only be called once.Enforced through the coin module, which has been audited.
4Any type of operation on the APT coin should fail if the user has not registered for the coin.MediumCoin operations may succeed only on valid user coin registration.Enforced through the coin module, which has been audited.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer): (coin::BurnCapability<aptos_coin::AptosCoin>, coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + + + +
let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if !string::spec_internal_check_utf8(b"Aptos Coin");
+aborts_if !string::spec_internal_check_utf8(b"APT");
+aborts_if exists<MintCapStore>(addr);
+aborts_if exists<coin::CoinInfo<AptosCoin>>(addr);
+aborts_if !exists<aggregator_factory::AggregatorFactory>(addr);
+// This enforces high-level requirement 1:
+ensures exists<MintCapStore>(addr);
+// This enforces high-level requirement 3:
+ensures global<MintCapStore>(addr).mint_cap ==  MintCapability<AptosCoin> {};
+ensures exists<coin::CoinInfo<AptosCoin>>(addr);
+ensures result_1 == BurnCapability<AptosCoin> {};
+ensures result_2 == MintCapability<AptosCoin> {};
+
+ + + + + +### Function `destroy_mint_cap` + + +
public(friend) fun destroy_mint_cap(aptos_framework: &signer)
+
+ + + + +
let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if !exists<MintCapStore>(@aptos_framework);
+
+ + + + + +### Function `configure_accounts_for_test` + + +
public(friend) fun configure_accounts_for_test(aptos_framework: &signer, core_resources: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `mint` + + +
public entry fun mint(account: &signer, dst_addr: address, amount: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `delegate_mint_capability` + + +
public entry fun delegate_mint_capability(account: signer, to: address)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `claim_mint_capability` + + +
public entry fun claim_mint_capability(account: &signer)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `find_delegation` + + +
fun find_delegation(addr: address): option::Option<u64>
+
+ + + + +
aborts_if !exists<Delegations>(@core_resources);
+
+ + + + + + + +
schema ExistsAptosCoin {
+    requires exists<coin::CoinInfo<AptosCoin>>(@aptos_framework);
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_governance.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_governance.md new file mode 100644 index 0000000000000..1a666013b1907 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/aptos_governance.md @@ -0,0 +1,3136 @@ + + + +# Module `0x1::aptos_governance` + + +AptosGovernance represents the on-chain governance of the Aptos network. Voting power is calculated based on the +current epoch's voting power of the proposer or voter's backing stake pool. In addition, for it to count, +the stake pool's lockup needs to be at least as long as the proposal's duration. + +It provides the following flow: +1. Proposers can create a proposal by calling AptosGovernance::create_proposal. The proposer's backing stake pool +needs to have the minimum proposer stake required. Off-chain components can subscribe to CreateProposalEvent to +track proposal creation and proposal ids. +2. Voters can vote on a proposal. Their voting power is derived from the backing stake pool. A stake pool can vote +on a proposal multiple times as long as the total voting power of these votes doesn't exceed its total voting power. + + +- [Resource `GovernanceResponsbility`](#0x1_aptos_governance_GovernanceResponsbility) +- [Resource `GovernanceConfig`](#0x1_aptos_governance_GovernanceConfig) +- [Struct `RecordKey`](#0x1_aptos_governance_RecordKey) +- [Resource `VotingRecords`](#0x1_aptos_governance_VotingRecords) +- [Resource `VotingRecordsV2`](#0x1_aptos_governance_VotingRecordsV2) +- [Resource `ApprovedExecutionHashes`](#0x1_aptos_governance_ApprovedExecutionHashes) +- [Resource `GovernanceEvents`](#0x1_aptos_governance_GovernanceEvents) +- [Struct `CreateProposalEvent`](#0x1_aptos_governance_CreateProposalEvent) +- [Struct `VoteEvent`](#0x1_aptos_governance_VoteEvent) +- [Struct `UpdateConfigEvent`](#0x1_aptos_governance_UpdateConfigEvent) +- [Struct `CreateProposal`](#0x1_aptos_governance_CreateProposal) +- [Struct `Vote`](#0x1_aptos_governance_Vote) +- [Struct `UpdateConfig`](#0x1_aptos_governance_UpdateConfig) +- [Constants](#@Constants_0) +- [Function `store_signer_cap`](#0x1_aptos_governance_store_signer_cap) +- [Function `initialize`](#0x1_aptos_governance_initialize) +- [Function `update_governance_config`](#0x1_aptos_governance_update_governance_config) +- [Function `initialize_partial_voting`](#0x1_aptos_governance_initialize_partial_voting) +- [Function `get_voting_duration_secs`](#0x1_aptos_governance_get_voting_duration_secs) +- [Function `get_min_voting_threshold`](#0x1_aptos_governance_get_min_voting_threshold) +- [Function `get_required_proposer_stake`](#0x1_aptos_governance_get_required_proposer_stake) +- [Function `has_entirely_voted`](#0x1_aptos_governance_has_entirely_voted) +- [Function `get_remaining_voting_power`](#0x1_aptos_governance_get_remaining_voting_power) +- [Function `create_proposal`](#0x1_aptos_governance_create_proposal) +- [Function `create_proposal_v2`](#0x1_aptos_governance_create_proposal_v2) +- [Function `create_proposal_v2_impl`](#0x1_aptos_governance_create_proposal_v2_impl) +- [Function `batch_vote`](#0x1_aptos_governance_batch_vote) +- [Function `batch_partial_vote`](#0x1_aptos_governance_batch_partial_vote) +- [Function `vote`](#0x1_aptos_governance_vote) +- [Function `partial_vote`](#0x1_aptos_governance_partial_vote) +- [Function `vote_internal`](#0x1_aptos_governance_vote_internal) +- [Function `add_approved_script_hash_script`](#0x1_aptos_governance_add_approved_script_hash_script) +- [Function `add_approved_script_hash`](#0x1_aptos_governance_add_approved_script_hash) +- [Function `resolve`](#0x1_aptos_governance_resolve) +- [Function `resolve_multi_step_proposal`](#0x1_aptos_governance_resolve_multi_step_proposal) +- [Function `remove_approved_hash`](#0x1_aptos_governance_remove_approved_hash) +- [Function `reconfigure`](#0x1_aptos_governance_reconfigure) +- [Function `force_end_epoch`](#0x1_aptos_governance_force_end_epoch) +- [Function `force_end_epoch_test_only`](#0x1_aptos_governance_force_end_epoch_test_only) +- [Function `toggle_features`](#0x1_aptos_governance_toggle_features) +- [Function `get_signer_testnet_only`](#0x1_aptos_governance_get_signer_testnet_only) +- [Function `get_voting_power`](#0x1_aptos_governance_get_voting_power) +- [Function `get_signer`](#0x1_aptos_governance_get_signer) +- [Function `create_proposal_metadata`](#0x1_aptos_governance_create_proposal_metadata) +- [Function `assert_voting_initialization`](#0x1_aptos_governance_assert_voting_initialization) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `store_signer_cap`](#@Specification_1_store_signer_cap) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `update_governance_config`](#@Specification_1_update_governance_config) + - [Function `initialize_partial_voting`](#@Specification_1_initialize_partial_voting) + - [Function `get_voting_duration_secs`](#@Specification_1_get_voting_duration_secs) + - [Function `get_min_voting_threshold`](#@Specification_1_get_min_voting_threshold) + - [Function `get_required_proposer_stake`](#@Specification_1_get_required_proposer_stake) + - [Function `has_entirely_voted`](#@Specification_1_has_entirely_voted) + - [Function `get_remaining_voting_power`](#@Specification_1_get_remaining_voting_power) + - [Function `create_proposal`](#@Specification_1_create_proposal) + - [Function `create_proposal_v2`](#@Specification_1_create_proposal_v2) + - [Function `create_proposal_v2_impl`](#@Specification_1_create_proposal_v2_impl) + - [Function `batch_vote`](#@Specification_1_batch_vote) + - [Function `batch_partial_vote`](#@Specification_1_batch_partial_vote) + - [Function `vote`](#@Specification_1_vote) + - [Function `partial_vote`](#@Specification_1_partial_vote) + - [Function `vote_internal`](#@Specification_1_vote_internal) + - [Function `add_approved_script_hash_script`](#@Specification_1_add_approved_script_hash_script) + - [Function `add_approved_script_hash`](#@Specification_1_add_approved_script_hash) + - [Function `resolve`](#@Specification_1_resolve) + - [Function `resolve_multi_step_proposal`](#@Specification_1_resolve_multi_step_proposal) + - [Function `remove_approved_hash`](#@Specification_1_remove_approved_hash) + - [Function `reconfigure`](#@Specification_1_reconfigure) + - [Function `force_end_epoch`](#@Specification_1_force_end_epoch) + - [Function `force_end_epoch_test_only`](#@Specification_1_force_end_epoch_test_only) + - [Function `toggle_features`](#@Specification_1_toggle_features) + - [Function `get_signer_testnet_only`](#@Specification_1_get_signer_testnet_only) + - [Function `get_voting_power`](#@Specification_1_get_voting_power) + - [Function `get_signer`](#@Specification_1_get_signer) + - [Function `create_proposal_metadata`](#@Specification_1_create_proposal_metadata) + - [Function `assert_voting_initialization`](#@Specification_1_assert_voting_initialization) + + +
use 0x1::account;
+use 0x1::aptos_coin;
+use 0x1::coin;
+use 0x1::consensus_config;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::governance_proposal;
+use 0x1::math64;
+use 0x1::option;
+use 0x1::randomness_config;
+use 0x1::reconfiguration_with_dkg;
+use 0x1::signer;
+use 0x1::simple_map;
+use 0x1::smart_table;
+use 0x1::stake;
+use 0x1::staking_config;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::table;
+use 0x1::timestamp;
+use 0x1::vector;
+use 0x1::voting;
+
+ + + + + +## Resource `GovernanceResponsbility` + +Store the SignerCapabilities of accounts under the on-chain governance's control. + + +
struct GovernanceResponsbility has key
+
+ + + +
+Fields + + +
+
+signer_caps: simple_map::SimpleMap<address, account::SignerCapability> +
+
+ +
+
+ + +
+ + + +## Resource `GovernanceConfig` + +Configurations of the AptosGovernance, set during Genesis and can be updated by the same process offered +by this AptosGovernance module. + + +
struct GovernanceConfig has key
+
+ + + +
+Fields + + +
+
+min_voting_threshold: u128 +
+
+ +
+
+required_proposer_stake: u64 +
+
+ +
+
+voting_duration_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `RecordKey` + + + +
struct RecordKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+stake_pool: address +
+
+ +
+
+proposal_id: u64 +
+
+ +
+
+ + +
+ + + +## Resource `VotingRecords` + +Records to track the proposals each stake pool has been used to vote on. + + +
struct VotingRecords has key
+
+ + + +
+Fields + + +
+
+votes: table::Table<aptos_governance::RecordKey, bool> +
+
+ +
+
+ + +
+ + + +## Resource `VotingRecordsV2` + +Records to track the voting power usage of each stake pool on each proposal. + + +
struct VotingRecordsV2 has key
+
+ + + +
+Fields + + +
+
+votes: smart_table::SmartTable<aptos_governance::RecordKey, u64> +
+
+ +
+
+ + +
+ + + +## Resource `ApprovedExecutionHashes` + +Used to track which execution script hashes have been approved by governance. +This is required to bypass cases where the execution scripts exceed the size limit imposed by mempool. + + +
struct ApprovedExecutionHashes has key
+
+ + + +
+Fields + + +
+
+hashes: simple_map::SimpleMap<u64, vector<u8>> +
+
+ +
+
+ + +
+ + + +## Resource `GovernanceEvents` + +Events generated by interactions with the AptosGovernance module. + + +
struct GovernanceEvents has key
+
+ + + +
+Fields + + +
+
+create_proposal_events: event::EventHandle<aptos_governance::CreateProposalEvent> +
+
+ +
+
+update_config_events: event::EventHandle<aptos_governance::UpdateConfigEvent> +
+
+ +
+
+vote_events: event::EventHandle<aptos_governance::VoteEvent> +
+
+ +
+
+ + +
+ + + +## Struct `CreateProposalEvent` + +Event emitted when a proposal is created. + + +
struct CreateProposalEvent has drop, store
+
+ + + +
+Fields + + +
+
+proposer: address +
+
+ +
+
+stake_pool: address +
+
+ +
+
+proposal_id: u64 +
+
+ +
+
+execution_hash: vector<u8> +
+
+ +
+
+proposal_metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+ + +
+ + + +## Struct `VoteEvent` + +Event emitted when there's a vote on a proposa; + + +
struct VoteEvent has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+voter: address +
+
+ +
+
+stake_pool: address +
+
+ +
+
+num_votes: u64 +
+
+ +
+
+should_pass: bool +
+
+ +
+
+ + +
+ + + +## Struct `UpdateConfigEvent` + +Event emitted when the governance configs are updated. + + +
struct UpdateConfigEvent has drop, store
+
+ + + +
+Fields + + +
+
+min_voting_threshold: u128 +
+
+ +
+
+required_proposer_stake: u64 +
+
+ +
+
+voting_duration_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CreateProposal` + +Event emitted when a proposal is created. + + +
#[event]
+struct CreateProposal has drop, store
+
+ + + +
+Fields + + +
+
+proposer: address +
+
+ +
+
+stake_pool: address +
+
+ +
+
+proposal_id: u64 +
+
+ +
+
+execution_hash: vector<u8> +
+
+ +
+
+proposal_metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+ + +
+ + + +## Struct `Vote` + +Event emitted when there's a vote on a proposa; + + +
#[event]
+struct Vote has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+voter: address +
+
+ +
+
+stake_pool: address +
+
+ +
+
+num_votes: u64 +
+
+ +
+
+should_pass: bool +
+
+ +
+
+ + +
+ + + +## Struct `UpdateConfig` + +Event emitted when the governance configs are updated. + + +
#[event]
+struct UpdateConfig has drop, store
+
+ + + +
+Fields + + +
+
+min_voting_threshold: u128 +
+
+ +
+
+required_proposer_stake: u64 +
+
+ +
+
+voting_duration_secs: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + +This matches the same enum const in voting. We have to duplicate it as Move doesn't have support for enums yet. + + +
const PROPOSAL_STATE_SUCCEEDED: u64 = 1;
+
+ + + + + +The specified stake pool has already been used to vote on the same proposal + + +
const EALREADY_VOTED: u64 = 4;
+
+ + + + + +The specified stake pool does not have sufficient stake to create a proposal + + +
const EINSUFFICIENT_PROPOSER_STAKE: u64 = 1;
+
+ + + + + +The specified stake pool does not have long enough remaining lockup to create a proposal or vote + + +
const EINSUFFICIENT_STAKE_LOCKUP: u64 = 3;
+
+ + + + + +Metadata hash cannot be longer than 256 chars + + +
const EMETADATA_HASH_TOO_LONG: u64 = 10;
+
+ + + + + +Metadata location cannot be longer than 256 chars + + +
const EMETADATA_LOCATION_TOO_LONG: u64 = 9;
+
+ + + + + +This account is not the designated voter of the specified stake pool + + +
const ENOT_DELEGATED_VOTER: u64 = 2;
+
+ + + + + +The proposal in the argument is not a partial voting proposal. + + +
const ENOT_PARTIAL_VOTING_PROPOSAL: u64 = 14;
+
+ + + + + +The specified stake pool must be part of the validator set + + +
const ENO_VOTING_POWER: u64 = 5;
+
+ + + + + +Partial voting feature hasn't been properly initialized. + + +
const EPARTIAL_VOTING_NOT_INITIALIZED: u64 = 13;
+
+ + + + + +Proposal is not ready to be resolved. Waiting on time or votes + + +
const EPROPOSAL_NOT_RESOLVABLE_YET: u64 = 6;
+
+ + + + + +The proposal has not been resolved yet + + +
const EPROPOSAL_NOT_RESOLVED_YET: u64 = 8;
+
+ + + + + +Account is not authorized to call this function. + + +
const EUNAUTHORIZED: u64 = 11;
+
+ + + + + +The stake pool is using voting power more than it has. + + +
const EVOTING_POWER_OVERFLOW: u64 = 12;
+
+ + + + + + + +
const METADATA_HASH_KEY: vector<u8> = [109, 101, 116, 97, 100, 97, 116, 97, 95, 104, 97, 115, 104];
+
+ + + + + +Proposal metadata attribute keys. + + +
const METADATA_LOCATION_KEY: vector<u8> = [109, 101, 116, 97, 100, 97, 116, 97, 95, 108, 111, 99, 97, 116, 105, 111, 110];
+
+ + + + + +## Function `store_signer_cap` + +Can be called during genesis or by the governance itself. +Stores the signer capability for a given address. + + +
public fun store_signer_cap(aptos_framework: &signer, signer_address: address, signer_cap: account::SignerCapability)
+
+ + + +
+Implementation + + +
public fun store_signer_cap(
+    aptos_framework: &signer,
+    signer_address: address,
+    signer_cap: SignerCapability,
+) acquires GovernanceResponsbility {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    system_addresses::assert_framework_reserved(signer_address);
+
+    if (!exists<GovernanceResponsbility>(@aptos_framework)) {
+        move_to(
+            aptos_framework,
+            GovernanceResponsbility { signer_caps: simple_map::create<address, SignerCapability>() }
+        );
+    };
+
+    let signer_caps = &mut borrow_global_mut<GovernanceResponsbility>(@aptos_framework).signer_caps;
+    simple_map::add(signer_caps, signer_address, signer_cap);
+}
+
+ + + +
+ + + +## Function `initialize` + +Initializes the state for Aptos Governance. Can only be called during Genesis with a signer +for the aptos_framework (0x1) account. +This function is private because it's called directly from the vm. + + +
fun initialize(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
+
+ + + +
+Implementation + + +
fun initialize(
+    aptos_framework: &signer,
+    min_voting_threshold: u128,
+    required_proposer_stake: u64,
+    voting_duration_secs: u64,
+) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    voting::register<GovernanceProposal>(aptos_framework);
+    move_to(aptos_framework, GovernanceConfig {
+        voting_duration_secs,
+        min_voting_threshold,
+        required_proposer_stake,
+    });
+    move_to(aptos_framework, GovernanceEvents {
+        create_proposal_events: account::new_event_handle<CreateProposalEvent>(aptos_framework),
+        update_config_events: account::new_event_handle<UpdateConfigEvent>(aptos_framework),
+        vote_events: account::new_event_handle<VoteEvent>(aptos_framework),
+    });
+    move_to(aptos_framework, VotingRecords {
+        votes: table::new(),
+    });
+    move_to(aptos_framework, ApprovedExecutionHashes {
+        hashes: simple_map::create<u64, vector<u8>>(),
+    })
+}
+
+ + + +
+ + + +## Function `update_governance_config` + +Update the governance configurations. This can only be called as part of resolving a proposal in this same +AptosGovernance. + + +
public fun update_governance_config(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_governance_config(
+    aptos_framework: &signer,
+    min_voting_threshold: u128,
+    required_proposer_stake: u64,
+    voting_duration_secs: u64,
+) acquires GovernanceConfig, GovernanceEvents {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let governance_config = borrow_global_mut<GovernanceConfig>(@aptos_framework);
+    governance_config.voting_duration_secs = voting_duration_secs;
+    governance_config.min_voting_threshold = min_voting_threshold;
+    governance_config.required_proposer_stake = required_proposer_stake;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            UpdateConfig {
+                min_voting_threshold,
+                required_proposer_stake,
+                voting_duration_secs
+            },
+        )
+    };
+    let events = borrow_global_mut<GovernanceEvents>(@aptos_framework);
+    event::emit_event<UpdateConfigEvent>(
+        &mut events.update_config_events,
+        UpdateConfigEvent {
+            min_voting_threshold,
+            required_proposer_stake,
+            voting_duration_secs
+        },
+    );
+}
+
+ + + +
+ + + +## Function `initialize_partial_voting` + +Initializes the state for Aptos Governance partial voting. Can only be called through Aptos governance +proposals with a signer for the aptos_framework (0x1) account. + + +
public fun initialize_partial_voting(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize_partial_voting(
+    aptos_framework: &signer,
+) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    move_to(aptos_framework, VotingRecordsV2 {
+        votes: smart_table::new(),
+    });
+}
+
+ + + +
+ + + +## Function `get_voting_duration_secs` + + + +
#[view]
+public fun get_voting_duration_secs(): u64
+
+ + + +
+Implementation + + +
public fun get_voting_duration_secs(): u64 acquires GovernanceConfig {
+    borrow_global<GovernanceConfig>(@aptos_framework).voting_duration_secs
+}
+
+ + + +
+ + + +## Function `get_min_voting_threshold` + + + +
#[view]
+public fun get_min_voting_threshold(): u128
+
+ + + +
+Implementation + + +
public fun get_min_voting_threshold(): u128 acquires GovernanceConfig {
+    borrow_global<GovernanceConfig>(@aptos_framework).min_voting_threshold
+}
+
+ + + +
+ + + +## Function `get_required_proposer_stake` + + + +
#[view]
+public fun get_required_proposer_stake(): u64
+
+ + + +
+Implementation + + +
public fun get_required_proposer_stake(): u64 acquires GovernanceConfig {
+    borrow_global<GovernanceConfig>(@aptos_framework).required_proposer_stake
+}
+
+ + + +
+ + + +## Function `has_entirely_voted` + +Return true if a stake pool has already voted on a proposal before partial governance voting is enabled. + + +
#[view]
+public fun has_entirely_voted(stake_pool: address, proposal_id: u64): bool
+
+ + + +
+Implementation + + +
public fun has_entirely_voted(stake_pool: address, proposal_id: u64): bool acquires VotingRecords {
+    let record_key = RecordKey {
+        stake_pool,
+        proposal_id,
+    };
+    // If a stake pool has already voted on a proposal before partial governance voting is enabled,
+    // there is a record in VotingRecords.
+    let voting_records = borrow_global<VotingRecords>(@aptos_framework);
+    table::contains(&voting_records.votes, record_key)
+}
+
+ + + +
+ + + +## Function `get_remaining_voting_power` + +Return remaining voting power of a stake pool on a proposal. +Note: a stake pool's voting power on a proposal could increase over time(e.g. rewards/new stake). + + +
#[view]
+public fun get_remaining_voting_power(stake_pool: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_voting_power(
+    stake_pool: address,
+    proposal_id: u64
+): u64 acquires VotingRecords, VotingRecordsV2 {
+    assert_voting_initialization();
+
+    let proposal_expiration = voting::get_proposal_expiration_secs<GovernanceProposal>(
+        @aptos_framework,
+        proposal_id
+    );
+    let lockup_until = stake::get_lockup_secs(stake_pool);
+    // The voter's stake needs to be locked up at least as long as the proposal's expiration.
+    // Also no one can vote on a expired proposal.
+    if (proposal_expiration > lockup_until || timestamp::now_seconds() > proposal_expiration) {
+        return 0
+    };
+
+    // If a stake pool has already voted on a proposal before partial governance voting is enabled, the stake pool
+    // cannot vote on the proposal even after partial governance voting is enabled.
+    if (has_entirely_voted(stake_pool, proposal_id)) {
+        return 0
+    };
+    let record_key = RecordKey {
+        stake_pool,
+        proposal_id,
+    };
+    let used_voting_power = 0u64;
+    if (features::partial_governance_voting_enabled()) {
+        let voting_records_v2 = borrow_global<VotingRecordsV2>(@aptos_framework);
+        used_voting_power = *smart_table::borrow_with_default(&voting_records_v2.votes, record_key, &0);
+    };
+    get_voting_power(stake_pool) - used_voting_power
+}
+
+ + + +
+ + + +## Function `create_proposal` + +Create a single-step proposal with the backing stake_pool. +@param execution_hash Required. This is the hash of the resolution script. When the proposal is resolved, +only the exact script with matching hash can be successfully executed. + + +
public entry fun create_proposal(proposer: &signer, stake_pool: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun create_proposal(
+    proposer: &signer,
+    stake_pool: address,
+    execution_hash: vector<u8>,
+    metadata_location: vector<u8>,
+    metadata_hash: vector<u8>,
+) acquires GovernanceConfig, GovernanceEvents {
+    create_proposal_v2(proposer, stake_pool, execution_hash, metadata_location, metadata_hash, false);
+}
+
+ + + +
+ + + +## Function `create_proposal_v2` + +Create a single-step or multi-step proposal with the backing stake_pool. +@param execution_hash Required. This is the hash of the resolution script. When the proposal is resolved, +only the exact script with matching hash can be successfully executed. + + +
public entry fun create_proposal_v2(proposer: &signer, stake_pool: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>, is_multi_step_proposal: bool)
+
+ + + +
+Implementation + + +
public entry fun create_proposal_v2(
+    proposer: &signer,
+    stake_pool: address,
+    execution_hash: vector<u8>,
+    metadata_location: vector<u8>,
+    metadata_hash: vector<u8>,
+    is_multi_step_proposal: bool,
+) acquires GovernanceConfig, GovernanceEvents {
+    create_proposal_v2_impl(
+        proposer,
+        stake_pool,
+        execution_hash,
+        metadata_location,
+        metadata_hash,
+        is_multi_step_proposal
+    );
+}
+
+ + + +
+ + + +## Function `create_proposal_v2_impl` + +Create a single-step or multi-step proposal with the backing stake_pool. +@param execution_hash Required. This is the hash of the resolution script. When the proposal is resolved, +only the exact script with matching hash can be successfully executed. +Return proposal_id when a proposal is successfully created. + + +
public fun create_proposal_v2_impl(proposer: &signer, stake_pool: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>, is_multi_step_proposal: bool): u64
+
+ + + +
+Implementation + + +
public fun create_proposal_v2_impl(
+    proposer: &signer,
+    stake_pool: address,
+    execution_hash: vector<u8>,
+    metadata_location: vector<u8>,
+    metadata_hash: vector<u8>,
+    is_multi_step_proposal: bool,
+): u64 acquires GovernanceConfig, GovernanceEvents {
+    let proposer_address = signer::address_of(proposer);
+    assert!(
+        stake::get_delegated_voter(stake_pool) == proposer_address,
+        error::invalid_argument(ENOT_DELEGATED_VOTER)
+    );
+
+    // The proposer's stake needs to be at least the required bond amount.
+    let governance_config = borrow_global<GovernanceConfig>(@aptos_framework);
+    let stake_balance = get_voting_power(stake_pool);
+    assert!(
+        stake_balance >= governance_config.required_proposer_stake,
+        error::invalid_argument(EINSUFFICIENT_PROPOSER_STAKE),
+    );
+
+    // The proposer's stake needs to be locked up at least as long as the proposal's voting period.
+    let current_time = timestamp::now_seconds();
+    let proposal_expiration = current_time + governance_config.voting_duration_secs;
+    assert!(
+        stake::get_lockup_secs(stake_pool) >= proposal_expiration,
+        error::invalid_argument(EINSUFFICIENT_STAKE_LOCKUP),
+    );
+
+    // Create and validate proposal metadata.
+    let proposal_metadata = create_proposal_metadata(metadata_location, metadata_hash);
+
+    // We want to allow early resolution of proposals if more than 50% of the total supply of the network coins
+    // has voted. This doesn't take into subsequent inflation/deflation (rewards are issued every epoch and gas fees
+    // are burnt after every transaction), but inflation/delation is very unlikely to have a major impact on total
+    // supply during the voting period.
+    let total_voting_token_supply = coin::supply<AptosCoin>();
+    let early_resolution_vote_threshold = option::none<u128>();
+    if (option::is_some(&total_voting_token_supply)) {
+        let total_supply = *option::borrow(&total_voting_token_supply);
+        // 50% + 1 to avoid rounding errors.
+        early_resolution_vote_threshold = option::some(total_supply / 2 + 1);
+    };
+
+    let proposal_id = voting::create_proposal_v2(
+        proposer_address,
+        @aptos_framework,
+        governance_proposal::create_proposal(),
+        execution_hash,
+        governance_config.min_voting_threshold,
+        proposal_expiration,
+        early_resolution_vote_threshold,
+        proposal_metadata,
+        is_multi_step_proposal,
+    );
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CreateProposal {
+                proposal_id,
+                proposer: proposer_address,
+                stake_pool,
+                execution_hash,
+                proposal_metadata,
+            },
+        );
+    };
+    let events = borrow_global_mut<GovernanceEvents>(@aptos_framework);
+    event::emit_event<CreateProposalEvent>(
+        &mut events.create_proposal_events,
+        CreateProposalEvent {
+            proposal_id,
+            proposer: proposer_address,
+            stake_pool,
+            execution_hash,
+            proposal_metadata,
+        },
+    );
+    proposal_id
+}
+
+ + + +
+ + + +## Function `batch_vote` + +Vote on proposal with proposal_id and all voting power from multiple stake_pools. + + +
public entry fun batch_vote(voter: &signer, stake_pools: vector<address>, proposal_id: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
public entry fun batch_vote(
+    voter: &signer,
+    stake_pools: vector<address>,
+    proposal_id: u64,
+    should_pass: bool,
+) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents {
+    vector::for_each(stake_pools, |stake_pool| {
+        vote_internal(voter, stake_pool, proposal_id, MAX_U64, should_pass);
+    });
+}
+
+ + + +
+ + + +## Function `batch_partial_vote` + +Batch vote on proposal with proposal_id and specified voting power from multiple stake_pools. + + +
public entry fun batch_partial_vote(voter: &signer, stake_pools: vector<address>, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
public entry fun batch_partial_vote(
+    voter: &signer,
+    stake_pools: vector<address>,
+    proposal_id: u64,
+    voting_power: u64,
+    should_pass: bool,
+) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents {
+    vector::for_each(stake_pools, |stake_pool| {
+        vote_internal(voter, stake_pool, proposal_id, voting_power, should_pass);
+    });
+}
+
+ + + +
+ + + +## Function `vote` + +Vote on proposal with proposal_id and all voting power from stake_pool. + + +
public entry fun vote(voter: &signer, stake_pool: address, proposal_id: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
public entry fun vote(
+    voter: &signer,
+    stake_pool: address,
+    proposal_id: u64,
+    should_pass: bool,
+) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents {
+    vote_internal(voter, stake_pool, proposal_id, MAX_U64, should_pass);
+}
+
+ + + +
+ + + +## Function `partial_vote` + +Vote on proposal with proposal_id and specified voting power from stake_pool. + + +
public entry fun partial_vote(voter: &signer, stake_pool: address, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
public entry fun partial_vote(
+    voter: &signer,
+    stake_pool: address,
+    proposal_id: u64,
+    voting_power: u64,
+    should_pass: bool,
+) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents {
+    vote_internal(voter, stake_pool, proposal_id, voting_power, should_pass);
+}
+
+ + + +
+ + + +## Function `vote_internal` + +Vote on proposal with proposal_id and specified voting_power from stake_pool. +If voting_power is more than all the left voting power of stake_pool, use all the left voting power. +If a stake pool has already voted on a proposal before partial governance voting is enabled, the stake pool +cannot vote on the proposal even after partial governance voting is enabled. + + +
fun vote_internal(voter: &signer, stake_pool: address, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
fun vote_internal(
+    voter: &signer,
+    stake_pool: address,
+    proposal_id: u64,
+    voting_power: u64,
+    should_pass: bool,
+) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents {
+    let voter_address = signer::address_of(voter);
+    assert!(stake::get_delegated_voter(stake_pool) == voter_address, error::invalid_argument(ENOT_DELEGATED_VOTER));
+
+    // The voter's stake needs to be locked up at least as long as the proposal's expiration.
+    let proposal_expiration = voting::get_proposal_expiration_secs<GovernanceProposal>(
+        @aptos_framework,
+        proposal_id
+    );
+    assert!(
+        stake::get_lockup_secs(stake_pool) >= proposal_expiration,
+        error::invalid_argument(EINSUFFICIENT_STAKE_LOCKUP),
+    );
+
+    // If a stake pool has already voted on a proposal before partial governance voting is enabled,
+    // `get_remaining_voting_power` returns 0.
+    let staking_pool_voting_power = get_remaining_voting_power(stake_pool, proposal_id);
+    voting_power = min(voting_power, staking_pool_voting_power);
+
+    // Short-circuit if the voter has no voting power.
+    assert!(voting_power > 0, error::invalid_argument(ENO_VOTING_POWER));
+
+    voting::vote<GovernanceProposal>(
+        &governance_proposal::create_empty_proposal(),
+        @aptos_framework,
+        proposal_id,
+        voting_power,
+        should_pass,
+    );
+
+    let record_key = RecordKey {
+        stake_pool,
+        proposal_id,
+    };
+    if (features::partial_governance_voting_enabled()) {
+        let voting_records_v2 = borrow_global_mut<VotingRecordsV2>(@aptos_framework);
+        let used_voting_power = smart_table::borrow_mut_with_default(&mut voting_records_v2.votes, record_key, 0);
+        // This calculation should never overflow because the used voting cannot exceed the total voting power of this stake pool.
+        *used_voting_power = *used_voting_power + voting_power;
+    } else {
+        let voting_records = borrow_global_mut<VotingRecords>(@aptos_framework);
+        assert!(
+            !table::contains(&voting_records.votes, record_key),
+            error::invalid_argument(EALREADY_VOTED));
+        table::add(&mut voting_records.votes, record_key, true);
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            Vote {
+                proposal_id,
+                voter: voter_address,
+                stake_pool,
+                num_votes: voting_power,
+                should_pass,
+            },
+        );
+    };
+    let events = borrow_global_mut<GovernanceEvents>(@aptos_framework);
+    event::emit_event<VoteEvent>(
+        &mut events.vote_events,
+        VoteEvent {
+            proposal_id,
+            voter: voter_address,
+            stake_pool,
+            num_votes: voting_power,
+            should_pass,
+        },
+    );
+
+    let proposal_state = voting::get_proposal_state<GovernanceProposal>(@aptos_framework, proposal_id);
+    if (proposal_state == PROPOSAL_STATE_SUCCEEDED) {
+        add_approved_script_hash(proposal_id);
+    }
+}
+
+ + + +
+ + + +## Function `add_approved_script_hash_script` + + + +
public entry fun add_approved_script_hash_script(proposal_id: u64)
+
+ + + +
+Implementation + + +
public entry fun add_approved_script_hash_script(proposal_id: u64) acquires ApprovedExecutionHashes {
+    add_approved_script_hash(proposal_id)
+}
+
+ + + +
+ + + +## Function `add_approved_script_hash` + +Add the execution script hash of a successful governance proposal to the approved list. +This is needed to bypass the mempool transaction size limit for approved governance proposal transactions that +are too large (e.g. module upgrades). + + +
public fun add_approved_script_hash(proposal_id: u64)
+
+ + + +
+Implementation + + +
public fun add_approved_script_hash(proposal_id: u64) acquires ApprovedExecutionHashes {
+    let approved_hashes = borrow_global_mut<ApprovedExecutionHashes>(@aptos_framework);
+
+    // Ensure the proposal can be resolved.
+    let proposal_state = voting::get_proposal_state<GovernanceProposal>(@aptos_framework, proposal_id);
+    assert!(proposal_state == PROPOSAL_STATE_SUCCEEDED, error::invalid_argument(EPROPOSAL_NOT_RESOLVABLE_YET));
+
+    let execution_hash = voting::get_execution_hash<GovernanceProposal>(@aptos_framework, proposal_id);
+
+    // If this is a multi-step proposal, the proposal id will already exist in the ApprovedExecutionHashes map.
+    // We will update execution hash in ApprovedExecutionHashes to be the next_execution_hash.
+    if (simple_map::contains_key(&approved_hashes.hashes, &proposal_id)) {
+        let current_execution_hash = simple_map::borrow_mut(&mut approved_hashes.hashes, &proposal_id);
+        *current_execution_hash = execution_hash;
+    } else {
+        simple_map::add(&mut approved_hashes.hashes, proposal_id, execution_hash);
+    }
+}
+
+ + + +
+ + + +## Function `resolve` + +Resolve a successful single-step proposal. This would fail if the proposal is not successful (not enough votes or more no +than yes). + + +
public fun resolve(proposal_id: u64, signer_address: address): signer
+
+ + + +
+Implementation + + +
public fun resolve(
+    proposal_id: u64,
+    signer_address: address
+): signer acquires ApprovedExecutionHashes, GovernanceResponsbility {
+    voting::resolve<GovernanceProposal>(@aptos_framework, proposal_id);
+    remove_approved_hash(proposal_id);
+    get_signer(signer_address)
+}
+
+ + + +
+ + + +## Function `resolve_multi_step_proposal` + +Resolve a successful multi-step proposal. This would fail if the proposal is not successful. + + +
public fun resolve_multi_step_proposal(proposal_id: u64, signer_address: address, next_execution_hash: vector<u8>): signer
+
+ + + +
+Implementation + + +
public fun resolve_multi_step_proposal(
+    proposal_id: u64,
+    signer_address: address,
+    next_execution_hash: vector<u8>
+): signer acquires GovernanceResponsbility, ApprovedExecutionHashes {
+    voting::resolve_proposal_v2<GovernanceProposal>(@aptos_framework, proposal_id, next_execution_hash);
+    // If the current step is the last step of this multi-step proposal,
+    // we will remove the execution hash from the ApprovedExecutionHashes map.
+    if (vector::length(&next_execution_hash) == 0) {
+        remove_approved_hash(proposal_id);
+    } else {
+        // If the current step is not the last step of this proposal,
+        // we replace the current execution hash with the next execution hash
+        // in the ApprovedExecutionHashes map.
+        add_approved_script_hash(proposal_id)
+    };
+    get_signer(signer_address)
+}
+
+ + + +
+ + + +## Function `remove_approved_hash` + +Remove an approved proposal's execution script hash. + + +
public fun remove_approved_hash(proposal_id: u64)
+
+ + + +
+Implementation + + +
public fun remove_approved_hash(proposal_id: u64) acquires ApprovedExecutionHashes {
+    assert!(
+        voting::is_resolved<GovernanceProposal>(@aptos_framework, proposal_id),
+        error::invalid_argument(EPROPOSAL_NOT_RESOLVED_YET),
+    );
+
+    let approved_hashes = &mut borrow_global_mut<ApprovedExecutionHashes>(@aptos_framework).hashes;
+    if (simple_map::contains_key(approved_hashes, &proposal_id)) {
+        simple_map::remove(approved_hashes, &proposal_id);
+    };
+}
+
+ + + +
+ + + +## Function `reconfigure` + +Manually reconfigure. Called at the end of a governance txn that alters on-chain configs. + +WARNING: this function always ensures a reconfiguration starts, but when the reconfiguration finishes depends. +- If feature RECONFIGURE_WITH_DKG is disabled, it finishes immediately. +- At the end of the calling transaction, we will be in a new epoch. +- If feature RECONFIGURE_WITH_DKG is enabled, it starts DKG, and the new epoch will start in a block prologue after DKG finishes. + +This behavior affects when an update of an on-chain config (e.g. ConsensusConfig, Features) takes effect, +since such updates are applied whenever we enter an new epoch. + + +
public entry fun reconfigure(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun reconfigure(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    if (consensus_config::validator_txn_enabled() && randomness_config::enabled()) {
+        reconfiguration_with_dkg::try_start();
+    } else {
+        reconfiguration_with_dkg::finish(aptos_framework);
+    }
+}
+
+ + + +
+ + + +## Function `force_end_epoch` + +Change epoch immediately. +If RECONFIGURE_WITH_DKG is enabled and we are in the middle of a DKG, +stop waiting for DKG and enter the new epoch without randomness. + +WARNING: currently only used by tests. In most cases you should use reconfigure() instead. +TODO: migrate these tests to be aware of async reconfiguration. + + +
public entry fun force_end_epoch(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun force_end_epoch(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    reconfiguration_with_dkg::finish(aptos_framework);
+}
+
+ + + +
+ + + +## Function `force_end_epoch_test_only` + +force_end_epoch() equivalent but only called in testnet, +where the core resources account exists and has been granted power to mint Aptos coins. + + +
public entry fun force_end_epoch_test_only(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun force_end_epoch_test_only(aptos_framework: &signer) acquires GovernanceResponsbility {
+    let core_signer = get_signer_testnet_only(aptos_framework, @0x1);
+    system_addresses::assert_aptos_framework(&core_signer);
+    reconfiguration_with_dkg::finish(&core_signer);
+}
+
+ + + +
+ + + +## Function `toggle_features` + +Update feature flags and also trigger reconfiguration. + + +
public fun toggle_features(aptos_framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
public fun toggle_features(aptos_framework: &signer, enable: vector<u64>, disable: vector<u64>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    features::change_feature_flags_for_next_epoch(aptos_framework, enable, disable);
+    reconfigure(aptos_framework);
+}
+
+ + + +
+ + + +## Function `get_signer_testnet_only` + +Only called in testnet where the core resources account exists and has been granted power to mint Aptos coins. + + +
public fun get_signer_testnet_only(core_resources: &signer, signer_address: address): signer
+
+ + + +
+Implementation + + +
public fun get_signer_testnet_only(
+    core_resources: &signer, signer_address: address): signer acquires GovernanceResponsbility {
+    system_addresses::assert_core_resource(core_resources);
+    // Core resources account only has mint capability in tests/testnets.
+    assert!(aptos_coin::has_mint_capability(core_resources), error::unauthenticated(EUNAUTHORIZED));
+    get_signer(signer_address)
+}
+
+ + + +
+ + + +## Function `get_voting_power` + +Return the voting power a stake pool has with respect to governance proposals. + + +
#[view]
+public fun get_voting_power(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun get_voting_power(pool_address: address): u64 {
+    let allow_validator_set_change = staking_config::get_allow_validator_set_change(&staking_config::get());
+    if (allow_validator_set_change) {
+        let (active, _, pending_active, pending_inactive) = stake::get_stake(pool_address);
+        // We calculate the voting power as total non-inactive stakes of the pool. Even if the validator is not in the
+        // active validator set, as long as they have a lockup (separately checked in create_proposal and voting), their
+        // stake would still count in their voting power for governance proposals.
+        active + pending_active + pending_inactive
+    } else {
+        stake::get_current_epoch_voting_power(pool_address)
+    }
+}
+
+ + + +
+ + + +## Function `get_signer` + +Return a signer for making changes to 0x1 as part of on-chain governance proposal process. + + +
fun get_signer(signer_address: address): signer
+
+ + + +
+Implementation + + +
fun get_signer(signer_address: address): signer acquires GovernanceResponsbility {
+    let governance_responsibility = borrow_global<GovernanceResponsbility>(@aptos_framework);
+    let signer_cap = simple_map::borrow(&governance_responsibility.signer_caps, &signer_address);
+    create_signer_with_capability(signer_cap)
+}
+
+ + + +
+ + + +## Function `create_proposal_metadata` + + + +
fun create_proposal_metadata(metadata_location: vector<u8>, metadata_hash: vector<u8>): simple_map::SimpleMap<string::String, vector<u8>>
+
+ + + +
+Implementation + + +
fun create_proposal_metadata(
+    metadata_location: vector<u8>,
+    metadata_hash: vector<u8>
+): SimpleMap<String, vector<u8>> {
+    assert!(string::length(&utf8(metadata_location)) <= 256, error::invalid_argument(EMETADATA_LOCATION_TOO_LONG));
+    assert!(string::length(&utf8(metadata_hash)) <= 256, error::invalid_argument(EMETADATA_HASH_TOO_LONG));
+
+    let metadata = simple_map::create<String, vector<u8>>();
+    simple_map::add(&mut metadata, utf8(METADATA_LOCATION_KEY), metadata_location);
+    simple_map::add(&mut metadata, utf8(METADATA_HASH_KEY), metadata_hash);
+    metadata
+}
+
+ + + +
+ + + +## Function `assert_voting_initialization` + + + +
fun assert_voting_initialization()
+
+ + + +
+Implementation + + +
fun assert_voting_initialization() {
+    if (features::partial_governance_voting_enabled()) {
+        assert!(exists<VotingRecordsV2>(@aptos_framework), error::invalid_state(EPARTIAL_VOTING_NOT_INITIALIZED));
+    };
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The create proposal function calls create proposal v2.LowThe create_proposal function internally calls create_proposal_v2.This is manually audited to ensure create_proposal_v2 is called in create_proposal.
2The proposer must have a stake equal to or greater than the required bond amount.HighThe create_proposal_v2 function verifies that the stake balance equals or exceeds the required proposer stake amount.Formally verified in CreateProposalAbortsIf.
3The Approved execution hashes resources that exist when the vote function is called.LowThe Vote function acquires the Approved execution hashes resources.Formally verified in VoteAbortIf.
4The execution script hash of a successful governance proposal is added to the approved list if the proposal can be resolved.MediumThe add_approved_script_hash function asserts that proposal_state == PROPOSAL_STATE_SUCCEEDED.Formally verified in AddApprovedScriptHash.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `store_signer_cap` + + +
public fun store_signer_cap(aptos_framework: &signer, signer_address: address, signer_cap: account::SignerCapability)
+
+ + + + +
aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
+aborts_if !system_addresses::is_framework_reserved_address(signer_address);
+let signer_caps = global<GovernanceResponsbility>(@aptos_framework).signer_caps;
+aborts_if exists<GovernanceResponsbility>(@aptos_framework) &&
+    simple_map::spec_contains_key(signer_caps, signer_address);
+ensures exists<GovernanceResponsbility>(@aptos_framework);
+let post post_signer_caps = global<GovernanceResponsbility>(@aptos_framework).signer_caps;
+ensures simple_map::spec_contains_key(post_signer_caps, signer_address);
+
+ + + + + +### Function `initialize` + + +
fun initialize(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
+
+ + +Signer address must be @aptos_framework. +The signer does not allow these resources (GovernanceProposal, GovernanceConfig, GovernanceEvents, VotingRecords, ApprovedExecutionHashes) to exist. +The signer must have an Account. +Limit addition overflow. + + +
let addr = signer::address_of(aptos_framework);
+let register_account = global<account::Account>(addr);
+aborts_if exists<voting::VotingForum<GovernanceProposal>>(addr);
+aborts_if !exists<account::Account>(addr);
+aborts_if register_account.guid_creation_num + 7 > MAX_U64;
+aborts_if register_account.guid_creation_num + 7 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !type_info::spec_is_struct<GovernanceProposal>();
+include InitializeAbortIf;
+ensures exists<voting::VotingForum<governance_proposal::GovernanceProposal>>(addr);
+ensures exists<GovernanceConfig>(addr);
+ensures exists<GovernanceEvents>(addr);
+ensures exists<VotingRecords>(addr);
+ensures exists<ApprovedExecutionHashes>(addr);
+
+ + + + + +### Function `update_governance_config` + + +
public fun update_governance_config(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
+
+ + +Signer address must be @aptos_framework. +Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. + + +
let addr = signer::address_of(aptos_framework);
+let governance_config = global<GovernanceConfig>(@aptos_framework);
+let post new_governance_config = global<GovernanceConfig>(@aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if !exists<GovernanceConfig>(@aptos_framework);
+aborts_if !exists<GovernanceEvents>(@aptos_framework);
+modifies global<GovernanceConfig>(addr);
+ensures new_governance_config.voting_duration_secs == voting_duration_secs;
+ensures new_governance_config.min_voting_threshold == min_voting_threshold;
+ensures new_governance_config.required_proposer_stake == required_proposer_stake;
+
+ + + + + +### Function `initialize_partial_voting` + + +
public fun initialize_partial_voting(aptos_framework: &signer)
+
+ + +Signer address must be @aptos_framework. +Abort if structs have already been created. + + +
let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if exists<VotingRecordsV2>(@aptos_framework);
+ensures exists<VotingRecordsV2>(@aptos_framework);
+
+ + + + + + + +
schema InitializeAbortIf {
+    aptos_framework: &signer;
+    min_voting_threshold: u128;
+    required_proposer_stake: u64;
+    voting_duration_secs: u64;
+    let addr = signer::address_of(aptos_framework);
+    let account = global<account::Account>(addr);
+    aborts_if addr != @aptos_framework;
+    aborts_if exists<voting::VotingForum<governance_proposal::GovernanceProposal>>(addr);
+    aborts_if exists<GovernanceConfig>(addr);
+    aborts_if exists<GovernanceEvents>(addr);
+    aborts_if exists<VotingRecords>(addr);
+    aborts_if exists<ApprovedExecutionHashes>(addr);
+    aborts_if !exists<account::Account>(addr);
+}
+
+ + + + + +### Function `get_voting_duration_secs` + + +
#[view]
+public fun get_voting_duration_secs(): u64
+
+ + + + +
include AbortsIfNotGovernanceConfig;
+
+ + + + + +### Function `get_min_voting_threshold` + + +
#[view]
+public fun get_min_voting_threshold(): u128
+
+ + + + +
include AbortsIfNotGovernanceConfig;
+
+ + + + + +### Function `get_required_proposer_stake` + + +
#[view]
+public fun get_required_proposer_stake(): u64
+
+ + + + +
include AbortsIfNotGovernanceConfig;
+
+ + + + + + + +
schema AbortsIfNotGovernanceConfig {
+    aborts_if !exists<GovernanceConfig>(@aptos_framework);
+}
+
+ + + + + +### Function `has_entirely_voted` + + +
#[view]
+public fun has_entirely_voted(stake_pool: address, proposal_id: u64): bool
+
+ + + + +
aborts_if !exists<VotingRecords>(@aptos_framework);
+
+ + + + + +### Function `get_remaining_voting_power` + + +
#[view]
+public fun get_remaining_voting_power(stake_pool: address, proposal_id: u64): u64
+
+ + + + +
aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
+include voting::AbortsIfNotContainProposalID<GovernanceProposal> {
+    voting_forum_address: @aptos_framework
+};
+aborts_if !exists<stake::StakePool>(stake_pool);
+aborts_if spec_proposal_expiration <= locked_until && !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+let spec_proposal_expiration = voting::spec_get_proposal_expiration_secs<GovernanceProposal>(@aptos_framework, proposal_id);
+let locked_until = global<stake::StakePool>(stake_pool).locked_until_secs;
+let remain_zero_1_cond = (spec_proposal_expiration > locked_until || timestamp::spec_now_seconds() > spec_proposal_expiration);
+ensures remain_zero_1_cond ==> result == 0;
+let record_key = RecordKey {
+    stake_pool,
+    proposal_id,
+};
+let entirely_voted = spec_has_entirely_voted(stake_pool, proposal_id, record_key);
+aborts_if !remain_zero_1_cond && !exists<VotingRecords>(@aptos_framework);
+include !remain_zero_1_cond && !entirely_voted ==> GetVotingPowerAbortsIf {
+    pool_address: stake_pool
+};
+let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+let voting_power = spec_get_voting_power(stake_pool, staking_config);
+let voting_records_v2 = borrow_global<VotingRecordsV2>(@aptos_framework);
+let used_voting_power = if (smart_table::spec_contains(voting_records_v2.votes, record_key)) {
+    smart_table::spec_get(voting_records_v2.votes, record_key)
+} else {
+    0
+};
+aborts_if !remain_zero_1_cond && !entirely_voted && features::spec_partial_governance_voting_enabled() &&
+    used_voting_power > 0 && voting_power < used_voting_power;
+ensures result == spec_get_remaining_voting_power(stake_pool, proposal_id);
+
+ + + + + + + +
fun spec_get_remaining_voting_power(stake_pool: address, proposal_id: u64): u64 {
+   let spec_proposal_expiration = voting::spec_get_proposal_expiration_secs<GovernanceProposal>(@aptos_framework, proposal_id);
+   let locked_until = global<stake::StakePool>(stake_pool).locked_until_secs;
+   let remain_zero_1_cond = (spec_proposal_expiration > locked_until || timestamp::spec_now_seconds() > spec_proposal_expiration);
+   let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+   let voting_records_v2 = borrow_global<VotingRecordsV2>(@aptos_framework);
+   let record_key = RecordKey {
+       stake_pool,
+       proposal_id,
+   };
+   let entirely_voted = spec_has_entirely_voted(stake_pool, proposal_id, record_key);
+   let voting_power = spec_get_voting_power(stake_pool, staking_config);
+   let used_voting_power = if (smart_table::spec_contains(voting_records_v2.votes, record_key)) {
+       smart_table::spec_get(voting_records_v2.votes, record_key)
+   } else {
+       0
+   };
+   if (remain_zero_1_cond) {
+       0
+   } else if (entirely_voted) {
+       0
+   } else if (!features::spec_partial_governance_voting_enabled()) {
+       voting_power
+   } else {
+       voting_power - used_voting_power
+   }
+}
+
+ + + + + + + +
fun spec_has_entirely_voted(stake_pool: address, proposal_id: u64, record_key: RecordKey): bool {
+   let voting_records = global<VotingRecords>(@aptos_framework);
+   table::spec_contains(voting_records.votes, record_key)
+}
+
+ + + + + + + +
schema GetVotingPowerAbortsIf {
+    pool_address: address;
+    let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+    aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+    let allow_validator_set_change = staking_config.allow_validator_set_change;
+    let stake_pool_res = global<stake::StakePool>(pool_address);
+    aborts_if allow_validator_set_change && (stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value) > MAX_U64;
+    aborts_if !exists<stake::StakePool>(pool_address);
+    aborts_if !allow_validator_set_change && !exists<stake::ValidatorSet>(@aptos_framework);
+    aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(pool_address) && stake_pool_res.active.value + stake_pool_res.pending_inactive.value > MAX_U64;
+}
+
+ + + + + +### Function `create_proposal` + + +
public entry fun create_proposal(proposer: &signer, stake_pool: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>)
+
+ + +The same as spec of create_proposal_v2(). + + +
pragma verify_duration_estimate = 60;
+requires chain_status::is_operating();
+include CreateProposalAbortsIf;
+
+ + + + + +### Function `create_proposal_v2` + + +
public entry fun create_proposal_v2(proposer: &signer, stake_pool: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>, is_multi_step_proposal: bool)
+
+ + + + +
pragma verify_duration_estimate = 60;
+requires chain_status::is_operating();
+include CreateProposalAbortsIf;
+
+ + + + + +### Function `create_proposal_v2_impl` + + +
public fun create_proposal_v2_impl(proposer: &signer, stake_pool: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>, is_multi_step_proposal: bool): u64
+
+ + + + +
pragma verify_duration_estimate = 60;
+requires chain_status::is_operating();
+include CreateProposalAbortsIf;
+
+ + + + + +### Function `batch_vote` + + +
public entry fun batch_vote(voter: &signer, stake_pools: vector<address>, proposal_id: u64, should_pass: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `batch_partial_vote` + + +
public entry fun batch_partial_vote(voter: &signer, stake_pools: vector<address>, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `vote` + + +
public entry fun vote(voter: &signer, stake_pool: address, proposal_id: u64, should_pass: bool)
+
+ + +stake_pool must exist StakePool. +The delegated voter under the resource StakePool of the stake_pool must be the voter address. +Address @aptos_framework must exist VotingRecords and GovernanceProposal. + + +
pragma verify_duration_estimate = 60;
+requires chain_status::is_operating();
+include VoteAbortIf  {
+    voting_power: MAX_U64
+};
+
+ + + + + +### Function `partial_vote` + + +
public entry fun partial_vote(voter: &signer, stake_pool: address, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + +stake_pool must exist StakePool. +The delegated voter under the resource StakePool of the stake_pool must be the voter address. +Address @aptos_framework must exist VotingRecords and GovernanceProposal. +Address @aptos_framework must exist VotingRecordsV2 if partial_governance_voting flag is enabled. + + +
pragma verify_duration_estimate = 60;
+requires chain_status::is_operating();
+include VoteAbortIf;
+
+ + + + + +### Function `vote_internal` + + +
fun vote_internal(voter: &signer, stake_pool: address, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + +stake_pool must exist StakePool. +The delegated voter under the resource StakePool of the stake_pool must be the voter address. +Address @aptos_framework must exist VotingRecords and GovernanceProposal. +Address @aptos_framework must exist VotingRecordsV2 if partial_governance_voting flag is enabled. + + +
pragma verify_duration_estimate = 60;
+requires chain_status::is_operating();
+include VoteAbortIf;
+
+ + + + + + + +
schema VoteAbortIf {
+    voter: &signer;
+    stake_pool: address;
+    proposal_id: u64;
+    should_pass: bool;
+    voting_power: u64;
+    include VotingGetDelegatedVoterAbortsIf { sign: voter };
+    aborts_if spec_proposal_expiration <= locked_until && !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    let spec_proposal_expiration = voting::spec_get_proposal_expiration_secs<GovernanceProposal>(@aptos_framework, proposal_id);
+    let locked_until = global<stake::StakePool>(stake_pool).locked_until_secs;
+    let remain_zero_1_cond = (spec_proposal_expiration > locked_until || timestamp::spec_now_seconds() > spec_proposal_expiration);
+    let record_key = RecordKey {
+        stake_pool,
+        proposal_id,
+    };
+    let entirely_voted = spec_has_entirely_voted(stake_pool, proposal_id, record_key);
+    aborts_if !remain_zero_1_cond && !exists<VotingRecords>(@aptos_framework);
+    include !remain_zero_1_cond && !entirely_voted ==> GetVotingPowerAbortsIf {
+        pool_address: stake_pool
+    };
+    let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+    let spec_voting_power = spec_get_voting_power(stake_pool, staking_config);
+    let voting_records_v2 = borrow_global<VotingRecordsV2>(@aptos_framework);
+    let used_voting_power = if (smart_table::spec_contains(voting_records_v2.votes, record_key)) {
+        smart_table::spec_get(voting_records_v2.votes, record_key)
+    } else {
+        0
+    };
+    aborts_if !remain_zero_1_cond && !entirely_voted && features::spec_partial_governance_voting_enabled() &&
+        used_voting_power > 0 && spec_voting_power < used_voting_power;
+    let remaining_power = spec_get_remaining_voting_power(stake_pool, proposal_id);
+    let real_voting_power =  min(voting_power, remaining_power);
+    aborts_if !(real_voting_power > 0);
+    aborts_if !exists<VotingRecords>(@aptos_framework);
+    let voting_records = global<VotingRecords>(@aptos_framework);
+    let allow_validator_set_change = global<staking_config::StakingConfig>(@aptos_framework).allow_validator_set_change;
+    let stake_pool_res = global<stake::StakePool>(stake_pool);
+    aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+    aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+    let proposal_expiration = proposal.expiration_secs;
+    let locked_until_secs = global<stake::StakePool>(stake_pool).locked_until_secs;
+    aborts_if proposal_expiration > locked_until_secs;
+    aborts_if timestamp::now_seconds() > proposal_expiration;
+    aborts_if proposal.is_resolved;
+    aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    let execution_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    aborts_if simple_map::spec_contains_key(proposal.metadata, execution_key) &&
+              simple_map::spec_get(proposal.metadata, execution_key) != std::bcs::to_bytes(false);
+    aborts_if
+        if (should_pass) { proposal.yes_votes + real_voting_power > MAX_U128 } else { proposal.no_votes + real_voting_power > MAX_U128 };
+    let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+    aborts_if !string::spec_internal_check_utf8(voting::RESOLVABLE_TIME_METADATA_KEY);
+    let key = utf8(voting::RESOLVABLE_TIME_METADATA_KEY);
+    ensures simple_map::spec_contains_key(post_proposal.metadata, key);
+    ensures simple_map::spec_get(post_proposal.metadata, key) == std::bcs::to_bytes(timestamp::now_seconds());
+    aborts_if features::spec_partial_governance_voting_enabled() && used_voting_power + real_voting_power > MAX_U64;
+    aborts_if !features::spec_partial_governance_voting_enabled() && table::spec_contains(voting_records.votes, record_key);
+    aborts_if !exists<GovernanceEvents>(@aptos_framework);
+    let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+    let is_voting_period_over = timestamp::spec_now_seconds() > proposal_expiration;
+    let new_proposal_yes_votes_0 = proposal.yes_votes + real_voting_power;
+    let can_be_resolved_early_0 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (new_proposal_yes_votes_0 >= early_resolution_threshold ||
+                                 proposal.no_votes >= early_resolution_threshold);
+    let is_voting_closed_0 = is_voting_period_over || can_be_resolved_early_0;
+    let proposal_state_successed_0 = is_voting_closed_0 && new_proposal_yes_votes_0 > proposal.no_votes &&
+                                     new_proposal_yes_votes_0 + proposal.no_votes >= proposal.min_vote_threshold;
+    let new_proposal_no_votes_0 = proposal.no_votes + real_voting_power;
+    let can_be_resolved_early_1 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (proposal.yes_votes >= early_resolution_threshold ||
+                                 new_proposal_no_votes_0 >= early_resolution_threshold);
+    let is_voting_closed_1 = is_voting_period_over || can_be_resolved_early_1;
+    let proposal_state_successed_1 = is_voting_closed_1 && proposal.yes_votes > new_proposal_no_votes_0 &&
+                                     proposal.yes_votes + new_proposal_no_votes_0 >= proposal.min_vote_threshold;
+    let new_proposal_yes_votes_1 = proposal.yes_votes + real_voting_power;
+    let can_be_resolved_early_2 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (new_proposal_yes_votes_1 >= early_resolution_threshold ||
+                                 proposal.no_votes >= early_resolution_threshold);
+    let is_voting_closed_2 = is_voting_period_over || can_be_resolved_early_2;
+    let proposal_state_successed_2 = is_voting_closed_2 && new_proposal_yes_votes_1 > proposal.no_votes &&
+                                     new_proposal_yes_votes_1 + proposal.no_votes >= proposal.min_vote_threshold;
+    let new_proposal_no_votes_1 = proposal.no_votes + real_voting_power;
+    let can_be_resolved_early_3 = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (proposal.yes_votes >= early_resolution_threshold ||
+                                 new_proposal_no_votes_1 >= early_resolution_threshold);
+    let is_voting_closed_3 = is_voting_period_over || can_be_resolved_early_3;
+    let proposal_state_successed_3 = is_voting_closed_3 && proposal.yes_votes > new_proposal_no_votes_1 &&
+                                     proposal.yes_votes + new_proposal_no_votes_1 >= proposal.min_vote_threshold;
+    let post can_be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (post_proposal.yes_votes >= early_resolution_threshold ||
+                                 post_proposal.no_votes >= early_resolution_threshold);
+    let post is_voting_closed = is_voting_period_over || can_be_resolved_early;
+    let post proposal_state_successed = is_voting_closed && post_proposal.yes_votes > post_proposal.no_votes &&
+                                     post_proposal.yes_votes + post_proposal.no_votes >= proposal.min_vote_threshold;
+    let execution_hash = proposal.execution_hash;
+    let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework);
+    // This enforces high-level requirement 3:
+    aborts_if
+        if (should_pass) {
+            proposal_state_successed_0 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+        } else {
+            proposal_state_successed_1 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+        };
+    aborts_if
+        if (should_pass) {
+            proposal_state_successed_2 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+        } else {
+            proposal_state_successed_3 && !exists<ApprovedExecutionHashes>(@aptos_framework)
+        };
+    ensures proposal_state_successed ==> simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) &&
+                                         simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == execution_hash;
+    aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
+}
+
+ + + + + +### Function `add_approved_script_hash_script` + + +
public entry fun add_approved_script_hash_script(proposal_id: u64)
+
+ + + + +
requires chain_status::is_operating();
+include AddApprovedScriptHash;
+
+ + + + + + + +
schema AddApprovedScriptHash {
+    proposal_id: u64;
+    aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+    aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+    aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+    let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+    aborts_if timestamp::now_seconds() <= proposal.expiration_secs &&
+        (option::spec_is_none(proposal.early_resolution_vote_threshold) ||
+        proposal.yes_votes < early_resolution_threshold && proposal.no_votes < early_resolution_threshold);
+    aborts_if (timestamp::now_seconds() > proposal.expiration_secs ||
+        option::spec_is_some(proposal.early_resolution_vote_threshold) && (proposal.yes_votes >= early_resolution_threshold ||
+                                                                           proposal.no_votes >= early_resolution_threshold)) &&
+        (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold);
+    let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework);
+    // This enforces high-level requirement 4:
+    ensures simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) &&
+        simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == proposal.execution_hash;
+}
+
+ + + + + +### Function `add_approved_script_hash` + + +
public fun add_approved_script_hash(proposal_id: u64)
+
+ + + + +
requires chain_status::is_operating();
+include AddApprovedScriptHash;
+
+ + + + + +### Function `resolve` + + +
public fun resolve(proposal_id: u64, signer_address: address): signer
+
+ + +Address @aptos_framework must exist ApprovedExecutionHashes and GovernanceProposal and GovernanceResponsbility. + + +
requires chain_status::is_operating();
+include VotingIsProposalResolvableAbortsif;
+let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let multi_step_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+let has_multi_step_key = simple_map::spec_contains_key(proposal.metadata, multi_step_key);
+let is_multi_step_proposal = aptos_std::from_bcs::deserialize<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+aborts_if has_multi_step_key && !aptos_std::from_bcs::deserializable<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+aborts_if has_multi_step_key && is_multi_step_proposal;
+let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+ensures post_proposal.is_resolved == true && post_proposal.resolution_time_secs == timestamp::now_seconds();
+aborts_if option::spec_is_none(proposal.execution_content);
+aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework).hashes;
+ensures !simple_map::spec_contains_key(post_approved_hashes, proposal_id);
+include GetSignerAbortsIf;
+let governance_responsibility = global<GovernanceResponsbility>(@aptos_framework);
+let signer_cap = simple_map::spec_get(governance_responsibility.signer_caps, signer_address);
+let addr = signer_cap.account;
+ensures signer::address_of(result) == addr;
+
+ + + + + +### Function `resolve_multi_step_proposal` + + +
public fun resolve_multi_step_proposal(proposal_id: u64, signer_address: address, next_execution_hash: vector<u8>): signer
+
+ + + + +
requires chain_status::is_operating();
+pragma verify_duration_estimate = 120;
+include VotingIsProposalResolvableAbortsif;
+let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let multi_step_in_execution_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let post is_multi_step_proposal_in_execution_value = simple_map::spec_get(post_proposal.metadata, multi_step_in_execution_key);
+aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+let multi_step_key = utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+aborts_if simple_map::spec_contains_key(proposal.metadata, multi_step_key) &&
+    !aptos_std::from_bcs::deserializable<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+let is_multi_step = simple_map::spec_contains_key(proposal.metadata, multi_step_key) &&
+                    aptos_std::from_bcs::deserialize<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+let next_execution_hash_is_empty = len(next_execution_hash) == 0;
+aborts_if !is_multi_step && !next_execution_hash_is_empty;
+aborts_if next_execution_hash_is_empty && is_multi_step && !simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key);
+ensures next_execution_hash_is_empty ==> post_proposal.is_resolved == true && post_proposal.resolution_time_secs == timestamp::spec_now_seconds() &&
+    if (is_multi_step) {
+        is_multi_step_proposal_in_execution_value == std::bcs::serialize(false)
+    } else {
+        simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) ==>
+            is_multi_step_proposal_in_execution_value == std::bcs::serialize(true)
+    };
+ensures !next_execution_hash_is_empty ==> post_proposal.execution_hash == next_execution_hash;
+aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+let post post_approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework).hashes;
+ensures next_execution_hash_is_empty ==> !simple_map::spec_contains_key(post_approved_hashes, proposal_id);
+ensures !next_execution_hash_is_empty ==>
+    simple_map::spec_get(post_approved_hashes, proposal_id) == next_execution_hash;
+include GetSignerAbortsIf;
+let governance_responsibility = global<GovernanceResponsbility>(@aptos_framework);
+let signer_cap = simple_map::spec_get(governance_responsibility.signer_caps, signer_address);
+let addr = signer_cap.account;
+ensures signer::address_of(result) == addr;
+
+ + + + + + + +
schema VotingIsProposalResolvableAbortsif {
+    proposal_id: u64;
+    aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+    aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+    let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+    let voting_period_over = timestamp::now_seconds() > proposal.expiration_secs;
+    let be_resolved_early = option::spec_is_some(proposal.early_resolution_vote_threshold) &&
+                                (proposal.yes_votes >= early_resolution_threshold ||
+                                 proposal.no_votes >= early_resolution_threshold);
+    let voting_closed = voting_period_over || be_resolved_early;
+    aborts_if voting_closed && (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold);
+    aborts_if !voting_closed;
+    aborts_if proposal.is_resolved;
+    aborts_if !string::spec_internal_check_utf8(voting::RESOLVABLE_TIME_METADATA_KEY);
+    aborts_if !simple_map::spec_contains_key(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY));
+    let resolvable_time = aptos_std::from_bcs::deserialize<u64>(simple_map::spec_get(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY)));
+    aborts_if !aptos_std::from_bcs::deserializable<u64>(simple_map::spec_get(proposal.metadata, utf8(voting::RESOLVABLE_TIME_METADATA_KEY)));
+    aborts_if timestamp::now_seconds() <= resolvable_time;
+    aborts_if aptos_framework::transaction_context::spec_get_script_hash() != proposal.execution_hash;
+}
+
+ + + + + +### Function `remove_approved_hash` + + +
public fun remove_approved_hash(proposal_id: u64)
+
+ + +Address @aptos_framework must exist ApprovedExecutionHashes and GovernanceProposal. + + +
aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+aborts_if !exists<ApprovedExecutionHashes>(@aptos_framework);
+let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+aborts_if !proposal.is_resolved;
+let post approved_hashes = global<ApprovedExecutionHashes>(@aptos_framework).hashes;
+ensures !simple_map::spec_contains_key(approved_hashes, proposal_id);
+
+ + + + + +### Function `reconfigure` + + +
public entry fun reconfigure(aptos_framework: &signer)
+
+ + + + +
pragma verify = false;
+aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
+include reconfiguration_with_dkg::FinishRequirement {
+    framework: aptos_framework
+};
+include stake::GetReconfigStartTimeRequirement;
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+requires chain_status::is_operating();
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+requires exists<staking_config::StakingRewardsConfig>(@aptos_framework);
+include staking_config::StakingRewardsConfigRequirement;
+
+ + + + + +### Function `force_end_epoch` + + +
public entry fun force_end_epoch(aptos_framework: &signer)
+
+ + + + +
pragma verify = false;
+let address = signer::address_of(aptos_framework);
+include reconfiguration_with_dkg::FinishRequirement {
+    framework: aptos_framework
+};
+
+ + + + + + + +
schema VotingInitializationAbortIfs {
+    aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
+}
+
+ + + + + +### Function `force_end_epoch_test_only` + + +
public entry fun force_end_epoch_test_only(aptos_framework: &signer)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `toggle_features` + + +
public fun toggle_features(aptos_framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + +Signer address must be @aptos_framework. +Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. + + +
pragma verify = false;
+let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+include reconfiguration_with_dkg::FinishRequirement {
+    framework: aptos_framework
+};
+include stake::GetReconfigStartTimeRequirement;
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+requires chain_status::is_operating();
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+requires exists<staking_config::StakingRewardsConfig>(@aptos_framework);
+include staking_config::StakingRewardsConfigRequirement;
+
+ + + + + +### Function `get_signer_testnet_only` + + +
public fun get_signer_testnet_only(core_resources: &signer, signer_address: address): signer
+
+ + +Signer address must be @core_resources. +signer must exist in MintCapStore. +Address @aptos_framework must exist GovernanceResponsbility. + + +
aborts_if signer::address_of(core_resources) != @core_resources;
+aborts_if !exists<aptos_coin::MintCapStore>(signer::address_of(core_resources));
+include GetSignerAbortsIf;
+
+ + + + + +### Function `get_voting_power` + + +
#[view]
+public fun get_voting_power(pool_address: address): u64
+
+ + +Address @aptos_framework must exist StakingConfig. +limit addition overflow. +pool_address must exist in StakePool. + + +
include GetVotingPowerAbortsIf;
+let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+let allow_validator_set_change = staking_config.allow_validator_set_change;
+let stake_pool_res = global<stake::StakePool>(pool_address);
+ensures allow_validator_set_change ==> result == stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value;
+ensures !allow_validator_set_change ==> if (stake::spec_is_current_epoch_validator(pool_address)) {
+    result == stake_pool_res.active.value + stake_pool_res.pending_inactive.value
+} else {
+    result == 0
+};
+ensures result == spec_get_voting_power(pool_address, staking_config);
+
+ + + + + + + +
fun spec_get_voting_power(pool_address: address, staking_config: staking_config::StakingConfig): u64 {
+   let allow_validator_set_change = staking_config.allow_validator_set_change;
+   let stake_pool_res = global<stake::StakePool>(pool_address);
+   if (allow_validator_set_change) {
+       stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value
+   } else if (!allow_validator_set_change && (stake::spec_is_current_epoch_validator(pool_address))) {
+       stake_pool_res.active.value + stake_pool_res.pending_inactive.value
+   } else {
+       0
+   }
+}
+
+ + + + + +### Function `get_signer` + + +
fun get_signer(signer_address: address): signer
+
+ + + + +
include GetSignerAbortsIf;
+
+ + + + + + + +
schema GetSignerAbortsIf {
+    signer_address: address;
+    aborts_if !exists<GovernanceResponsbility>(@aptos_framework);
+    let cap_map = global<GovernanceResponsbility>(@aptos_framework).signer_caps;
+    aborts_if !simple_map::spec_contains_key(cap_map, signer_address);
+}
+
+ + + + + +### Function `create_proposal_metadata` + + +
fun create_proposal_metadata(metadata_location: vector<u8>, metadata_hash: vector<u8>): simple_map::SimpleMap<string::String, vector<u8>>
+
+ + + + +
include CreateProposalMetadataAbortsIf;
+
+ + + + + + + +
schema CreateProposalMetadataAbortsIf {
+    metadata_location: vector<u8>;
+    metadata_hash: vector<u8>;
+    aborts_if string::length(utf8(metadata_location)) > 256;
+    aborts_if string::length(utf8(metadata_hash)) > 256;
+    aborts_if !string::spec_internal_check_utf8(metadata_location);
+    aborts_if !string::spec_internal_check_utf8(metadata_hash);
+    aborts_if !string::spec_internal_check_utf8(METADATA_LOCATION_KEY);
+    aborts_if !string::spec_internal_check_utf8(METADATA_HASH_KEY);
+}
+
+ + + + + +### Function `assert_voting_initialization` + + +
fun assert_voting_initialization()
+
+ + + + +
include VotingInitializationAbortIfs;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/block.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/block.md new file mode 100644 index 0000000000000..5e56a8eac0dc7 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/block.md @@ -0,0 +1,1302 @@ + + + +# Module `0x1::block` + +This module defines a struct storing the metadata of the block and new block events. + + +- [Resource `BlockResource`](#0x1_block_BlockResource) +- [Resource `CommitHistory`](#0x1_block_CommitHistory) +- [Struct `NewBlockEvent`](#0x1_block_NewBlockEvent) +- [Struct `UpdateEpochIntervalEvent`](#0x1_block_UpdateEpochIntervalEvent) +- [Struct `NewBlock`](#0x1_block_NewBlock) +- [Struct `UpdateEpochInterval`](#0x1_block_UpdateEpochInterval) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_block_initialize) +- [Function `initialize_commit_history`](#0x1_block_initialize_commit_history) +- [Function `update_epoch_interval_microsecs`](#0x1_block_update_epoch_interval_microsecs) +- [Function `get_epoch_interval_secs`](#0x1_block_get_epoch_interval_secs) +- [Function `block_prologue_common`](#0x1_block_block_prologue_common) +- [Function `block_prologue`](#0x1_block_block_prologue) +- [Function `block_prologue_ext`](#0x1_block_block_prologue_ext) +- [Function `get_current_block_height`](#0x1_block_get_current_block_height) +- [Function `emit_new_block_event`](#0x1_block_emit_new_block_event) +- [Function `emit_genesis_block_event`](#0x1_block_emit_genesis_block_event) +- [Function `emit_writeset_block_event`](#0x1_block_emit_writeset_block_event) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Resource `BlockResource`](#@Specification_1_BlockResource) + - [Resource `CommitHistory`](#@Specification_1_CommitHistory) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `update_epoch_interval_microsecs`](#@Specification_1_update_epoch_interval_microsecs) + - [Function `get_epoch_interval_secs`](#@Specification_1_get_epoch_interval_secs) + - [Function `block_prologue_common`](#@Specification_1_block_prologue_common) + - [Function `block_prologue`](#@Specification_1_block_prologue) + - [Function `block_prologue_ext`](#@Specification_1_block_prologue_ext) + - [Function `get_current_block_height`](#@Specification_1_get_current_block_height) + - [Function `emit_new_block_event`](#@Specification_1_emit_new_block_event) + - [Function `emit_genesis_block_event`](#@Specification_1_emit_genesis_block_event) + - [Function `emit_writeset_block_event`](#@Specification_1_emit_writeset_block_event) + + +
use 0x1::account;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::option;
+use 0x1::randomness;
+use 0x1::reconfiguration;
+use 0x1::reconfiguration_with_dkg;
+use 0x1::stake;
+use 0x1::state_storage;
+use 0x1::system_addresses;
+use 0x1::table_with_length;
+use 0x1::timestamp;
+use 0x1::transaction_fee;
+
+ + + + + +## Resource `BlockResource` + +Should be in-sync with BlockResource rust struct in new_block.rs + + +
struct BlockResource has key
+
+ + + +
+Fields + + +
+
+height: u64 +
+
+ Height of the current block +
+
+epoch_interval: u64 +
+
+ Time period between epochs. +
+
+new_block_events: event::EventHandle<block::NewBlockEvent> +
+
+ Handle where events with the time of new blocks are emitted +
+
+update_epoch_interval_events: event::EventHandle<block::UpdateEpochIntervalEvent> +
+
+ +
+
+ + +
+ + + +## Resource `CommitHistory` + +Store new block events as a move resource, internally using a circular buffer. + + +
struct CommitHistory has key
+
+ + + +
+Fields + + +
+
+max_capacity: u32 +
+
+ +
+
+next_idx: u32 +
+
+ +
+
+table: table_with_length::TableWithLength<u32, block::NewBlockEvent> +
+
+ +
+
+ + +
+ + + +## Struct `NewBlockEvent` + +Should be in-sync with NewBlockEvent rust struct in new_block.rs + + +
struct NewBlockEvent has copy, drop, store
+
+ + + +
+Fields + + +
+
+hash: address +
+
+ +
+
+epoch: u64 +
+
+ +
+
+round: u64 +
+
+ +
+
+height: u64 +
+
+ +
+
+previous_block_votes_bitvec: vector<u8> +
+
+ +
+
+proposer: address +
+
+ +
+
+failed_proposer_indices: vector<u64> +
+
+ +
+
+time_microseconds: u64 +
+
+ On-chain time during the block at the given height +
+
+ + +
+ + + +## Struct `UpdateEpochIntervalEvent` + +Event emitted when a proposal is created. + + +
struct UpdateEpochIntervalEvent has drop, store
+
+ + + +
+Fields + + +
+
+old_epoch_interval: u64 +
+
+ +
+
+new_epoch_interval: u64 +
+
+ +
+
+ + +
+ + + +## Struct `NewBlock` + +Should be in-sync with NewBlockEvent rust struct in new_block.rs + + +
#[event]
+struct NewBlock has drop, store
+
+ + + +
+Fields + + +
+
+hash: address +
+
+ +
+
+epoch: u64 +
+
+ +
+
+round: u64 +
+
+ +
+
+height: u64 +
+
+ +
+
+previous_block_votes_bitvec: vector<u8> +
+
+ +
+
+proposer: address +
+
+ +
+
+failed_proposer_indices: vector<u64> +
+
+ +
+
+time_microseconds: u64 +
+
+ On-chain time during the block at the given height +
+
+ + +
+ + + +## Struct `UpdateEpochInterval` + +Event emitted when a proposal is created. + + +
#[event]
+struct UpdateEpochInterval has drop, store
+
+ + + +
+Fields + + +
+
+old_epoch_interval: u64 +
+
+ +
+
+new_epoch_interval: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + +An invalid proposer was provided. Expected the proposer to be the VM or an active validator. + + +
const EINVALID_PROPOSER: u64 = 2;
+
+ + + + + +The number of new block events does not equal the current block height. + + +
const ENUM_NEW_BLOCK_EVENTS_DOES_NOT_MATCH_BLOCK_HEIGHT: u64 = 1;
+
+ + + + + +Epoch interval cannot be 0. + + +
const EZERO_EPOCH_INTERVAL: u64 = 3;
+
+ + + + + +The maximum capacity of the commit history cannot be 0. + + +
const EZERO_MAX_CAPACITY: u64 = 3;
+
+ + + + + +## Function `initialize` + +This can only be called during Genesis. + + +
public(friend) fun initialize(aptos_framework: &signer, epoch_interval_microsecs: u64)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer, epoch_interval_microsecs: u64) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(epoch_interval_microsecs > 0, error::invalid_argument(EZERO_EPOCH_INTERVAL));
+
+    move_to<CommitHistory>(aptos_framework, CommitHistory {
+        max_capacity: 2000,
+        next_idx: 0,
+        table: table_with_length::new(),
+    });
+
+    move_to<BlockResource>(
+        aptos_framework,
+        BlockResource {
+            height: 0,
+            epoch_interval: epoch_interval_microsecs,
+            new_block_events: account::new_event_handle<NewBlockEvent>(aptos_framework),
+            update_epoch_interval_events: account::new_event_handle<UpdateEpochIntervalEvent>(aptos_framework),
+        }
+    );
+}
+
+ + + +
+ + + +## Function `initialize_commit_history` + +Initialize the commit history resource if it's not in genesis. + + +
public fun initialize_commit_history(fx: &signer, max_capacity: u32)
+
+ + + +
+Implementation + + +
public fun initialize_commit_history(fx: &signer, max_capacity: u32) {
+    assert!(max_capacity > 0, error::invalid_argument(EZERO_MAX_CAPACITY));
+    move_to<CommitHistory>(fx, CommitHistory {
+        max_capacity,
+        next_idx: 0,
+        table: table_with_length::new(),
+    });
+}
+
+ + + +
+ + + +## Function `update_epoch_interval_microsecs` + +Update the epoch interval. +Can only be called as part of the Aptos governance proposal process established by the AptosGovernance module. + + +
public fun update_epoch_interval_microsecs(aptos_framework: &signer, new_epoch_interval: u64)
+
+ + + +
+Implementation + + +
public fun update_epoch_interval_microsecs(
+    aptos_framework: &signer,
+    new_epoch_interval: u64,
+) acquires BlockResource {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(new_epoch_interval > 0, error::invalid_argument(EZERO_EPOCH_INTERVAL));
+
+    let block_resource = borrow_global_mut<BlockResource>(@aptos_framework);
+    let old_epoch_interval = block_resource.epoch_interval;
+    block_resource.epoch_interval = new_epoch_interval;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            UpdateEpochInterval { old_epoch_interval, new_epoch_interval },
+        );
+    };
+    event::emit_event<UpdateEpochIntervalEvent>(
+        &mut block_resource.update_epoch_interval_events,
+        UpdateEpochIntervalEvent { old_epoch_interval, new_epoch_interval },
+    );
+}
+
+ + + +
+ + + +## Function `get_epoch_interval_secs` + +Return epoch interval in seconds. + + +
#[view]
+public fun get_epoch_interval_secs(): u64
+
+ + + +
+Implementation + + +
public fun get_epoch_interval_secs(): u64 acquires BlockResource {
+    borrow_global<BlockResource>(@aptos_framework).epoch_interval / 1000000
+}
+
+ + + +
+ + + +## Function `block_prologue_common` + + + +
fun block_prologue_common(vm: &signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64): u64
+
+ + + +
+Implementation + + +
fun block_prologue_common(
+    vm: &signer,
+    hash: address,
+    epoch: u64,
+    round: u64,
+    proposer: address,
+    failed_proposer_indices: vector<u64>,
+    previous_block_votes_bitvec: vector<u8>,
+    timestamp: u64
+): u64 acquires BlockResource, CommitHistory {
+    // Operational constraint: can only be invoked by the VM.
+    system_addresses::assert_vm(vm);
+
+    // Blocks can only be produced by a valid proposer or by the VM itself for Nil blocks (no user txs).
+    assert!(
+        proposer == @vm_reserved || stake::is_current_epoch_validator(proposer),
+        error::permission_denied(EINVALID_PROPOSER),
+    );
+
+    let proposer_index = option::none();
+    if (proposer != @vm_reserved) {
+        proposer_index = option::some(stake::get_validator_index(proposer));
+    };
+
+    let block_metadata_ref = borrow_global_mut<BlockResource>(@aptos_framework);
+    block_metadata_ref.height = event::counter(&block_metadata_ref.new_block_events);
+
+    // Emit both event v1 and v2 for compatibility. Eventually only module events will be kept.
+    let new_block_event = NewBlockEvent {
+        hash,
+        epoch,
+        round,
+        height: block_metadata_ref.height,
+        previous_block_votes_bitvec,
+        proposer,
+        failed_proposer_indices,
+        time_microseconds: timestamp,
+    };
+    let new_block_event_v2 = NewBlock {
+        hash,
+        epoch,
+        round,
+        height: block_metadata_ref.height,
+        previous_block_votes_bitvec,
+        proposer,
+        failed_proposer_indices,
+        time_microseconds: timestamp,
+    };
+    emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event, new_block_event_v2);
+
+    if (features::collect_and_distribute_gas_fees()) {
+        // Assign the fees collected from the previous block to the previous block proposer.
+        // If for any reason the fees cannot be assigned, this function burns the collected coins.
+        transaction_fee::process_collected_fees();
+        // Set the proposer of this block as the receiver of the fees, so that the fees for this
+        // block are assigned to the right account.
+        transaction_fee::register_proposer_for_fee_collection(proposer);
+    };
+
+    // Performance scores have to be updated before the epoch transition as the transaction that triggers the
+    // transition is the last block in the previous epoch.
+    stake::update_performance_statistics(proposer_index, failed_proposer_indices);
+    state_storage::on_new_block(reconfiguration::current_epoch());
+
+    block_metadata_ref.epoch_interval
+}
+
+ + + +
+ + + +## Function `block_prologue` + +Set the metadata for the current block. +The runtime always runs this before executing the transactions in a block. + + +
fun block_prologue(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64)
+
+ + + +
+Implementation + + +
fun block_prologue(
+    vm: signer,
+    hash: address,
+    epoch: u64,
+    round: u64,
+    proposer: address,
+    failed_proposer_indices: vector<u64>,
+    previous_block_votes_bitvec: vector<u8>,
+    timestamp: u64
+) acquires BlockResource, CommitHistory {
+    let epoch_interval = block_prologue_common(&vm, hash, epoch, round, proposer, failed_proposer_indices, previous_block_votes_bitvec, timestamp);
+    randomness::on_new_block(&vm, epoch, round, option::none());
+    if (timestamp - reconfiguration::last_reconfiguration_time() >= epoch_interval) {
+        reconfiguration::reconfigure();
+    };
+}
+
+ + + +
+ + + +## Function `block_prologue_ext` + +block_prologue() but trigger reconfiguration with DKG after epoch timed out. + + +
fun block_prologue_ext(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64, randomness_seed: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
fun block_prologue_ext(
+    vm: signer,
+    hash: address,
+    epoch: u64,
+    round: u64,
+    proposer: address,
+    failed_proposer_indices: vector<u64>,
+    previous_block_votes_bitvec: vector<u8>,
+    timestamp: u64,
+    randomness_seed: Option<vector<u8>>,
+) acquires BlockResource, CommitHistory {
+    let epoch_interval = block_prologue_common(
+        &vm,
+        hash,
+        epoch,
+        round,
+        proposer,
+        failed_proposer_indices,
+        previous_block_votes_bitvec,
+        timestamp
+    );
+    randomness::on_new_block(&vm, epoch, round, randomness_seed);
+
+    if (timestamp - reconfiguration::last_reconfiguration_time() >= epoch_interval) {
+        reconfiguration_with_dkg::try_start();
+    };
+}
+
+ + + +
+ + + +## Function `get_current_block_height` + +Get the current block height + + +
#[view]
+public fun get_current_block_height(): u64
+
+ + + +
+Implementation + + +
public fun get_current_block_height(): u64 acquires BlockResource {
+    borrow_global<BlockResource>(@aptos_framework).height
+}
+
+ + + +
+ + + +## Function `emit_new_block_event` + +Emit the event and update height and global timestamp + + +
fun emit_new_block_event(vm: &signer, event_handle: &mut event::EventHandle<block::NewBlockEvent>, new_block_event: block::NewBlockEvent, new_block_event_v2: block::NewBlock)
+
+ + + +
+Implementation + + +
fun emit_new_block_event(
+    vm: &signer,
+    event_handle: &mut EventHandle<NewBlockEvent>,
+    new_block_event: NewBlockEvent,
+    new_block_event_v2: NewBlock
+) acquires CommitHistory {
+    if (exists<CommitHistory>(@aptos_framework)) {
+        let commit_history_ref = borrow_global_mut<CommitHistory>(@aptos_framework);
+        let idx = commit_history_ref.next_idx;
+        if (table_with_length::contains(&commit_history_ref.table, idx)) {
+            table_with_length::remove(&mut commit_history_ref.table, idx);
+        };
+        table_with_length::add(&mut commit_history_ref.table, idx, copy new_block_event);
+        spec {
+            assume idx + 1 <= MAX_U32;
+        };
+        commit_history_ref.next_idx = (idx + 1) % commit_history_ref.max_capacity;
+    };
+    timestamp::update_global_time(vm, new_block_event.proposer, new_block_event.time_microseconds);
+    assert!(
+        event::counter(event_handle) == new_block_event.height,
+        error::invalid_argument(ENUM_NEW_BLOCK_EVENTS_DOES_NOT_MATCH_BLOCK_HEIGHT),
+    );
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(new_block_event_v2);
+    };
+    event::emit_event<NewBlockEvent>(event_handle, new_block_event);
+}
+
+ + + +
+ + + +## Function `emit_genesis_block_event` + +Emit a NewBlockEvent event. This function will be invoked by genesis directly to generate the very first +reconfiguration event. + + +
fun emit_genesis_block_event(vm: signer)
+
+ + + +
+Implementation + + +
fun emit_genesis_block_event(vm: signer) acquires BlockResource, CommitHistory {
+    let block_metadata_ref = borrow_global_mut<BlockResource>(@aptos_framework);
+    let genesis_id = @0x0;
+    emit_new_block_event(
+        &vm,
+        &mut block_metadata_ref.new_block_events,
+        NewBlockEvent {
+            hash: genesis_id,
+            epoch: 0,
+            round: 0,
+            height: 0,
+            previous_block_votes_bitvec: vector::empty(),
+            proposer: @vm_reserved,
+            failed_proposer_indices: vector::empty(),
+            time_microseconds: 0,
+        },
+        NewBlock {
+            hash: genesis_id,
+            epoch: 0,
+            round: 0,
+            height: 0,
+            previous_block_votes_bitvec: vector::empty(),
+            proposer: @vm_reserved,
+            failed_proposer_indices: vector::empty(),
+            time_microseconds: 0,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `emit_writeset_block_event` + +Emit a NewBlockEvent event. This function will be invoked by write set script directly to generate the +new block event for WriteSetPayload. + + +
public fun emit_writeset_block_event(vm_signer: &signer, fake_block_hash: address)
+
+ + + +
+Implementation + + +
public fun emit_writeset_block_event(vm_signer: &signer, fake_block_hash: address) acquires BlockResource, CommitHistory {
+    system_addresses::assert_vm(vm_signer);
+    let block_metadata_ref = borrow_global_mut<BlockResource>(@aptos_framework);
+    block_metadata_ref.height = event::counter(&block_metadata_ref.new_block_events);
+
+    emit_new_block_event(
+        vm_signer,
+        &mut block_metadata_ref.new_block_events,
+        NewBlockEvent {
+            hash: fake_block_hash,
+            epoch: reconfiguration::current_epoch(),
+            round: MAX_U64,
+            height: block_metadata_ref.height,
+            previous_block_votes_bitvec: vector::empty(),
+            proposer: @vm_reserved,
+            failed_proposer_indices: vector::empty(),
+            time_microseconds: timestamp::now_microseconds(),
+        },
+        NewBlock {
+            hash: fake_block_hash,
+            epoch: reconfiguration::current_epoch(),
+            round: MAX_U64,
+            height: block_metadata_ref.height,
+            previous_block_votes_bitvec: vector::empty(),
+            proposer: @vm_reserved,
+            failed_proposer_indices: vector::empty(),
+            time_microseconds: timestamp::now_microseconds(),
+        }
+    );
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During the module's initialization, it guarantees that the BlockResource resource moves under the Aptos framework account with initial values.HighThe initialize function is responsible for setting up the initial state of the module, ensuring that the following conditions are met (1) the BlockResource resource is created, indicating its existence within the module's context, and moved under the Aptos framework account, (2) the block height is set to zero during initialization, and (3) the epoch interval is greater than zero.Formally Verified via Initialize.
2Only the Aptos framework address may execute the following functionalities: (1) initialize BlockResource, and (2) update the epoch interval.CriticalThe initialize and update_epoch_interval_microsecs functions ensure that only aptos_framework can call them.Formally Verified via Initialize and update_epoch_interval_microsecs.
3When updating the epoch interval, its value must be greater than zero and BlockResource must exist.HighThe update_epoch_interval_microsecs function asserts that new_epoch_interval is greater than zero and updates BlockResource's state.Formally verified via UpdateEpochIntervalMicrosecs and epoch_interval.
4Only a valid proposer or the virtual machine is authorized to produce blocks.CriticalDuring the execution of the block_prologue function, the validity of the proposer address is verified when setting the metadata for the current block.Formally Verified via block_prologue.
5While emitting a new block event, the number of them is equal to the current block height.MediumThe emit_new_block_event function asserts that the number of new block events equals the current block height.Formally Verified via emit_new_block_event.
+ + + + + + +### Module-level Specification + + +
invariant [suspendable] chain_status::is_operating() ==> exists<BlockResource>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==> exists<CommitHistory>(@aptos_framework);
+
+ + + + + +### Resource `BlockResource` + + +
struct BlockResource has key
+
+ + + +
+
+height: u64 +
+
+ Height of the current block +
+
+epoch_interval: u64 +
+
+ Time period between epochs. +
+
+new_block_events: event::EventHandle<block::NewBlockEvent> +
+
+ Handle where events with the time of new blocks are emitted +
+
+update_epoch_interval_events: event::EventHandle<block::UpdateEpochIntervalEvent> +
+
+ +
+
+ + + +
// This enforces high-level requirement 3:
+invariant epoch_interval > 0;
+
+ + + + + +### Resource `CommitHistory` + + +
struct CommitHistory has key
+
+ + + +
+
+max_capacity: u32 +
+
+ +
+
+next_idx: u32 +
+
+ +
+
+table: table_with_length::TableWithLength<u32, block::NewBlockEvent> +
+
+ +
+
+ + + +
invariant max_capacity > 0;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, epoch_interval_microsecs: u64)
+
+ + +The caller is aptos_framework. +The new_epoch_interval must be greater than 0. +The BlockResource is not under the caller before initializing. +The Account is not under the caller until the BlockResource is created for the caller. +Make sure The BlockResource under the caller existed after initializing. +The number of new events created does not exceed MAX_U64. + + +
// This enforces high-level requirement 1:
+include Initialize;
+include NewEventHandle;
+let addr = signer::address_of(aptos_framework);
+let account = global<account::Account>(addr);
+aborts_if account.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+
+ + + + + +### Function `update_epoch_interval_microsecs` + + +
public fun update_epoch_interval_microsecs(aptos_framework: &signer, new_epoch_interval: u64)
+
+ + +The caller is @aptos_framework. +The new_epoch_interval must be greater than 0. +The BlockResource existed under the @aptos_framework. + + +
// This enforces high-level requirement 3:
+include UpdateEpochIntervalMicrosecs;
+
+ + + + + + + +
schema UpdateEpochIntervalMicrosecs {
+    aptos_framework: signer;
+    new_epoch_interval: u64;
+    let addr = signer::address_of(aptos_framework);
+    // This enforces high-level requirement 2:
+    aborts_if addr != @aptos_framework;
+    aborts_if new_epoch_interval == 0;
+    aborts_if !exists<BlockResource>(addr);
+    let post block_resource = global<BlockResource>(addr);
+    ensures block_resource.epoch_interval == new_epoch_interval;
+}
+
+ + + + + +### Function `get_epoch_interval_secs` + + +
#[view]
+public fun get_epoch_interval_secs(): u64
+
+ + + + +
aborts_if !exists<BlockResource>(@aptos_framework);
+
+ + + + + +### Function `block_prologue_common` + + +
fun block_prologue_common(vm: &signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64): u64
+
+ + + + +
pragma verify_duration_estimate = 1000;
+include BlockRequirement;
+aborts_if false;
+
+ + + + + +### Function `block_prologue` + + +
fun block_prologue(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64)
+
+ + + + +
pragma verify_duration_estimate = 1000;
+requires timestamp >= reconfiguration::last_reconfiguration_time();
+include BlockRequirement;
+aborts_if false;
+
+ + + + + +### Function `block_prologue_ext` + + +
fun block_prologue_ext(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64, randomness_seed: option::Option<vector<u8>>)
+
+ + + + +
pragma verify_duration_estimate = 1000;
+requires timestamp >= reconfiguration::last_reconfiguration_time();
+include BlockRequirement;
+include stake::ResourceRequirement;
+include stake::GetReconfigStartTimeRequirement;
+aborts_if false;
+
+ + + + + +### Function `get_current_block_height` + + +
#[view]
+public fun get_current_block_height(): u64
+
+ + + + +
aborts_if !exists<BlockResource>(@aptos_framework);
+
+ + + + + +### Function `emit_new_block_event` + + +
fun emit_new_block_event(vm: &signer, event_handle: &mut event::EventHandle<block::NewBlockEvent>, new_block_event: block::NewBlockEvent, new_block_event_v2: block::NewBlock)
+
+ + + + +
let proposer = new_block_event.proposer;
+let timestamp = new_block_event.time_microseconds;
+requires chain_status::is_operating();
+requires system_addresses::is_vm(vm);
+requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp);
+requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp);
+// This enforces high-level requirement 5:
+requires event::counter(event_handle) == new_block_event.height;
+aborts_if false;
+
+ + + + + +### Function `emit_genesis_block_event` + + +
fun emit_genesis_block_event(vm: signer)
+
+ + + + +
requires chain_status::is_operating();
+requires system_addresses::is_vm(vm);
+requires event::counter(global<BlockResource>(@aptos_framework).new_block_events) == 0;
+requires (timestamp::spec_now_microseconds() == 0);
+aborts_if false;
+
+ + + + + +### Function `emit_writeset_block_event` + + +
public fun emit_writeset_block_event(vm_signer: &signer, fake_block_hash: address)
+
+ + +The caller is @vm_reserved. +The BlockResource existed under the @aptos_framework. +The Configuration existed under the @aptos_framework. +The CurrentTimeMicroseconds existed under the @aptos_framework. + + +
requires chain_status::is_operating();
+include EmitWritesetBlockEvent;
+
+ + + + + + + +
schema EmitWritesetBlockEvent {
+    vm_signer: signer;
+    let addr = signer::address_of(vm_signer);
+    aborts_if addr != @vm_reserved;
+    aborts_if !exists<BlockResource>(@aptos_framework);
+    aborts_if !exists<reconfiguration::Configuration>(@aptos_framework);
+    aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/chain_id.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/chain_id.md new file mode 100644 index 0000000000000..7a40b807b0418 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/chain_id.md @@ -0,0 +1,192 @@ + + + +# Module `0x1::chain_id` + +The chain id distinguishes between different chains (e.g., testnet and the main network). +One important role is to prevent transactions intended for one chain from being executed on another. +This code provides a container for storing a chain id and functions to initialize and get it. + + +- [Resource `ChainId`](#0x1_chain_id_ChainId) +- [Function `initialize`](#0x1_chain_id_initialize) +- [Function `get`](#0x1_chain_id_get) +- [Specification](#@Specification_0) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_0_initialize) + - [Function `get`](#@Specification_0_get) + + +
use 0x1::system_addresses;
+
+ + + + + +## Resource `ChainId` + + + +
struct ChainId has key
+
+ + + +
+Fields + + +
+
+id: u8 +
+
+ +
+
+ + +
+ + + +## Function `initialize` + +Only called during genesis. +Publish the chain ID id of this instance under the SystemAddresses address + + +
public(friend) fun initialize(aptos_framework: &signer, id: u8)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer, id: u8) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(aptos_framework, ChainId { id })
+}
+
+ + + +
+ + + +## Function `get` + +Return the chain ID of this instance. + + +
#[view]
+public fun get(): u8
+
+ + + +
+Implementation + + +
public fun get(): u8 acquires ChainId {
+    borrow_global<ChainId>(@aptos_framework).id
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During genesis, the ChainId resource should be created and moved under the Aptos framework account with the specified chain id.MediumThe chain_id::initialize function is responsible for generating the ChainId resource and then storing it under the aptos_framework account.Formally verified via initialize.
2The chain id can only be fetched if the chain id resource exists under the Aptos framework account.LowThe chain_id::get function fetches the chain id by borrowing the ChainId resource from the aptos_framework account.Formally verified via get.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, id: u8)
+
+ + + + +
let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if exists<ChainId>(@aptos_framework);
+// This enforces high-level requirement 1:
+ensures exists<ChainId>(addr);
+ensures global<ChainId>(addr).id == id;
+
+ + + + + +### Function `get` + + +
#[view]
+public fun get(): u8
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if !exists<ChainId>(@aptos_framework);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/chain_status.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/chain_status.md new file mode 100644 index 0000000000000..a8230b3f81b99 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/chain_status.md @@ -0,0 +1,339 @@ + + + +# Module `0x1::chain_status` + +This module code to assert that it is running in genesis (Self::assert_genesis) or after +genesis (Self::assert_operating). These are essentially distinct states of the system. Specifically, +if Self::assert_operating succeeds, assumptions about invariants over the global state can be made +which reflect that the system has been successfully initialized. + + +- [Resource `GenesisEndMarker`](#0x1_chain_status_GenesisEndMarker) +- [Constants](#@Constants_0) +- [Function `set_genesis_end`](#0x1_chain_status_set_genesis_end) +- [Function `is_genesis`](#0x1_chain_status_is_genesis) +- [Function `is_operating`](#0x1_chain_status_is_operating) +- [Function `assert_operating`](#0x1_chain_status_assert_operating) +- [Function `assert_genesis`](#0x1_chain_status_assert_genesis) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `set_genesis_end`](#@Specification_1_set_genesis_end) + - [Function `assert_operating`](#@Specification_1_assert_operating) + - [Function `assert_genesis`](#@Specification_1_assert_genesis) + + +
use 0x1::error;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `GenesisEndMarker` + +Marker to publish at the end of genesis. + + +
struct GenesisEndMarker has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The blockchain is not in the genesis status. + + +
const ENOT_GENESIS: u64 = 2;
+
+ + + + + +The blockchain is not in the operating status. + + +
const ENOT_OPERATING: u64 = 1;
+
+ + + + + +## Function `set_genesis_end` + +Marks that genesis has finished. + + +
public(friend) fun set_genesis_end(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun set_genesis_end(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(aptos_framework, GenesisEndMarker {});
+}
+
+ + + +
+ + + +## Function `is_genesis` + +Helper function to determine if Aptos is in genesis state. + + +
#[view]
+public fun is_genesis(): bool
+
+ + + +
+Implementation + + +
public fun is_genesis(): bool {
+    !exists<GenesisEndMarker>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `is_operating` + +Helper function to determine if Aptos is operating. This is +the same as !is_genesis() and is provided for convenience. +Testing is_operating() is more frequent than is_genesis(). + + +
#[view]
+public fun is_operating(): bool
+
+ + + +
+Implementation + + +
public fun is_operating(): bool {
+    exists<GenesisEndMarker>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `assert_operating` + +Helper function to assert operating (not genesis) state. + + +
public fun assert_operating()
+
+ + + +
+Implementation + + +
public fun assert_operating() {
+    assert!(is_operating(), error::invalid_state(ENOT_OPERATING));
+}
+
+ + + +
+ + + +## Function `assert_genesis` + +Helper function to assert genesis state. + + +
public fun assert_genesis()
+
+ + + +
+Implementation + + +
public fun assert_genesis() {
+    assert!(is_genesis(), error::invalid_state(ENOT_OPERATING));
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The end of genesis mark should persist throughout the entire life of the chain.MediumThe Aptos framework account should never drop the GenesisEndMarker resource.Audited that GenesisEndMarker is published at the end of genesis and never removed. Formally verified via set_genesis_end that GenesisEndMarker is published.
2The status of the chain should never be genesis and operating at the same time.LowThe status of the chain is determined by the GenesisEndMarker resource.Formally verified via global invariant.
3The status of the chain should only be changed once, from genesis to operating.LowAttempting to assign a resource type more than once will abort.Formally verified via set_genesis_end.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+// This enforces high-level requirement 2:
+invariant is_genesis() == !is_operating();
+
+ + + + + +### Function `set_genesis_end` + + +
public(friend) fun set_genesis_end(aptos_framework: &signer)
+
+ + + + +
pragma verify = true;
+pragma delegate_invariants_to_caller;
+let addr = signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+// This enforces high-level requirement 3:
+aborts_if exists<GenesisEndMarker>(@aptos_framework);
+// This enforces high-level requirement 1:
+ensures global<GenesisEndMarker>(@aptos_framework) == GenesisEndMarker {};
+
+ + + + + + + +
schema RequiresIsOperating {
+    requires is_operating();
+}
+
+ + + + + +### Function `assert_operating` + + +
public fun assert_operating()
+
+ + + + +
aborts_if !is_operating();
+
+ + + + + +### Function `assert_genesis` + + +
public fun assert_genesis()
+
+ + + + +
aborts_if !is_genesis();
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/code.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/code.md new file mode 100644 index 0000000000000..2f51dad607837 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/code.md @@ -0,0 +1,1256 @@ + + + +# Module `0x1::code` + +This module supports functionality related to code management. + + +- [Resource `PackageRegistry`](#0x1_code_PackageRegistry) +- [Struct `PackageMetadata`](#0x1_code_PackageMetadata) +- [Struct `PackageDep`](#0x1_code_PackageDep) +- [Struct `ModuleMetadata`](#0x1_code_ModuleMetadata) +- [Struct `UpgradePolicy`](#0x1_code_UpgradePolicy) +- [Struct `PublishPackage`](#0x1_code_PublishPackage) +- [Struct `AllowedDep`](#0x1_code_AllowedDep) +- [Constants](#@Constants_0) +- [Function `upgrade_policy_arbitrary`](#0x1_code_upgrade_policy_arbitrary) +- [Function `upgrade_policy_compat`](#0x1_code_upgrade_policy_compat) +- [Function `upgrade_policy_immutable`](#0x1_code_upgrade_policy_immutable) +- [Function `can_change_upgrade_policy_to`](#0x1_code_can_change_upgrade_policy_to) +- [Function `initialize`](#0x1_code_initialize) +- [Function `publish_package`](#0x1_code_publish_package) +- [Function `freeze_code_object`](#0x1_code_freeze_code_object) +- [Function `publish_package_txn`](#0x1_code_publish_package_txn) +- [Function `check_upgradability`](#0x1_code_check_upgradability) +- [Function `check_coexistence`](#0x1_code_check_coexistence) +- [Function `check_dependencies`](#0x1_code_check_dependencies) +- [Function `is_policy_exempted_address`](#0x1_code_is_policy_exempted_address) +- [Function `get_module_names`](#0x1_code_get_module_names) +- [Function `request_publish`](#0x1_code_request_publish) +- [Function `request_publish_with_allowed_deps`](#0x1_code_request_publish_with_allowed_deps) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `publish_package`](#@Specification_1_publish_package) + - [Function `freeze_code_object`](#@Specification_1_freeze_code_object) + - [Function `publish_package_txn`](#@Specification_1_publish_package_txn) + - [Function `check_upgradability`](#@Specification_1_check_upgradability) + - [Function `check_coexistence`](#@Specification_1_check_coexistence) + - [Function `check_dependencies`](#@Specification_1_check_dependencies) + - [Function `get_module_names`](#@Specification_1_get_module_names) + - [Function `request_publish`](#@Specification_1_request_publish) + - [Function `request_publish_with_allowed_deps`](#@Specification_1_request_publish_with_allowed_deps) + + +
use 0x1::copyable_any;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::object;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::util;
+use 0x1::vector;
+
+ + + + + +## Resource `PackageRegistry` + +The package registry at the given address. + + +
struct PackageRegistry has drop, store, key
+
+ + + +
+Fields + + +
+
+packages: vector<code::PackageMetadata> +
+
+ Packages installed at this address. +
+
+ + +
+ + + +## Struct `PackageMetadata` + +Metadata for a package. All byte blobs are represented as base64-of-gzipped-bytes + + +
struct PackageMetadata has copy, drop, store
+
+ + + +
+Fields + + +
+
+name: string::String +
+
+ Name of this package. +
+
+upgrade_policy: code::UpgradePolicy +
+
+ The upgrade policy of this package. +
+
+upgrade_number: u64 +
+
+ The numbers of times this module has been upgraded. Also serves as the on-chain version. + This field will be automatically assigned on successful upgrade. +
+
+source_digest: string::String +
+
+ The source digest of the sources in the package. This is constructed by first building the + sha256 of each individual source, than sorting them alphabetically, and sha256 them again. +
+
+manifest: vector<u8> +
+
+ The package manifest, in the Move.toml format. Gzipped text. +
+
+modules: vector<code::ModuleMetadata> +
+
+ The list of modules installed by this package. +
+
+deps: vector<code::PackageDep> +
+
+ Holds PackageDeps. +
+
+extension: option::Option<copyable_any::Any> +
+
+ For future extension +
+
+ + +
+ + + +## Struct `PackageDep` + +A dependency to a package published at address + + +
struct PackageDep has copy, drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+package_name: string::String +
+
+ +
+
+ + +
+ + + +## Struct `ModuleMetadata` + +Metadata about a module in a package. + + +
struct ModuleMetadata has copy, drop, store
+
+ + + +
+Fields + + +
+
+name: string::String +
+
+ Name of the module. +
+
+source: vector<u8> +
+
+ Source text, gzipped String. Empty if not provided. +
+
+source_map: vector<u8> +
+
+ Source map, in compressed BCS. Empty if not provided. +
+
+extension: option::Option<copyable_any::Any> +
+
+ For future extensions. +
+
+ + +
+ + + +## Struct `UpgradePolicy` + +Describes an upgrade policy + + +
struct UpgradePolicy has copy, drop, store
+
+ + + +
+Fields + + +
+
+policy: u8 +
+
+ +
+
+ + +
+ + + +## Struct `PublishPackage` + +Event emitted when code is published to an address. + + +
#[event]
+struct PublishPackage has drop, store
+
+ + + +
+Fields + + +
+
+code_address: address +
+
+ +
+
+is_upgrade: bool +
+
+ +
+
+ + +
+ + + +## Struct `AllowedDep` + +A helper type for request_publish_with_allowed_deps + + +
struct AllowedDep has drop
+
+ + + +
+Fields + + +
+
+account: address +
+
+ Address of the module. +
+
+module_name: string::String +
+
+ Name of the module. If this is the empty string, then this serves as a wildcard for + all modules from this address. This is used for speeding up dependency checking for packages from + well-known framework addresses, where we can assume that there are no malicious packages. +
+
+ + +
+ + + +## Constants + + + + +code_object does not exist. + + +
const ECODE_OBJECT_DOES_NOT_EXIST: u64 = 10;
+
+ + + + + +A dependency to an arbitrary package must be on the same address. + + +
const EDEP_ARBITRARY_NOT_SAME_ADDRESS: u64 = 7;
+
+ + + + + +A dependency cannot have a weaker upgrade policy. + + +
const EDEP_WEAKER_POLICY: u64 = 6;
+
+ + + + + +Creating a package with incompatible upgrade policy is disabled. + + +
const EINCOMPATIBLE_POLICY_DISABLED: u64 = 8;
+
+ + + + + +Cannot delete a module that was published in the same package + + +
const EMODULE_MISSING: u64 = 4;
+
+ + + + + +Package contains duplicate module names with existing modules publised in other packages on this address + + +
const EMODULE_NAME_CLASH: u64 = 1;
+
+ + + + + +Not the owner of the package registry. + + +
const ENOT_PACKAGE_OWNER: u64 = 9;
+
+ + + + + +Dependency could not be resolved to any published package. + + +
const EPACKAGE_DEP_MISSING: u64 = 5;
+
+ + + + + +Cannot upgrade an immutable package + + +
const EUPGRADE_IMMUTABLE: u64 = 2;
+
+ + + + + +Cannot downgrade a package's upgradability policy + + +
const EUPGRADE_WEAKER_POLICY: u64 = 3;
+
+ + + + + +## Function `upgrade_policy_arbitrary` + +Whether unconditional code upgrade with no compatibility check is allowed. This +publication mode should only be used for modules which aren't shared with user others. +The developer is responsible for not breaking memory layout of any resources he already +stored on chain. + + +
public fun upgrade_policy_arbitrary(): code::UpgradePolicy
+
+ + + +
+Implementation + + +
public fun upgrade_policy_arbitrary(): UpgradePolicy {
+    UpgradePolicy { policy: 0 }
+}
+
+ + + +
+ + + +## Function `upgrade_policy_compat` + +Whether a compatibility check should be performed for upgrades. The check only passes if +a new module has (a) the same public functions (b) for existing resources, no layout change. + + +
public fun upgrade_policy_compat(): code::UpgradePolicy
+
+ + + +
+Implementation + + +
public fun upgrade_policy_compat(): UpgradePolicy {
+    UpgradePolicy { policy: 1 }
+}
+
+ + + +
+ + + +## Function `upgrade_policy_immutable` + +Whether the modules in the package are immutable and cannot be upgraded. + + +
public fun upgrade_policy_immutable(): code::UpgradePolicy
+
+ + + +
+Implementation + + +
public fun upgrade_policy_immutable(): UpgradePolicy {
+    UpgradePolicy { policy: 2 }
+}
+
+ + + +
+ + + +## Function `can_change_upgrade_policy_to` + +Whether the upgrade policy can be changed. In general, the policy can be only +strengthened but not weakened. + + +
public fun can_change_upgrade_policy_to(from: code::UpgradePolicy, to: code::UpgradePolicy): bool
+
+ + + +
+Implementation + + +
public fun can_change_upgrade_policy_to(from: UpgradePolicy, to: UpgradePolicy): bool {
+    from.policy <= to.policy
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize package metadata for Genesis. + + +
fun initialize(aptos_framework: &signer, package_owner: &signer, metadata: code::PackageMetadata)
+
+ + + +
+Implementation + + +
fun initialize(aptos_framework: &signer, package_owner: &signer, metadata: PackageMetadata)
+acquires PackageRegistry {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let addr = signer::address_of(package_owner);
+    if (!exists<PackageRegistry>(addr)) {
+        move_to(package_owner, PackageRegistry { packages: vector[metadata] })
+    } else {
+        vector::push_back(&mut borrow_global_mut<PackageRegistry>(addr).packages, metadata)
+    }
+}
+
+ + + +
+ + + +## Function `publish_package` + +Publishes a package at the given signer's address. The caller must provide package metadata describing the +package. + + +
public fun publish_package(owner: &signer, pack: code::PackageMetadata, code: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public fun publish_package(owner: &signer, pack: PackageMetadata, code: vector<vector<u8>>) acquires PackageRegistry {
+    // Disallow incompatible upgrade mode. Governance can decide later if this should be reconsidered.
+    assert!(
+        pack.upgrade_policy.policy > upgrade_policy_arbitrary().policy,
+        error::invalid_argument(EINCOMPATIBLE_POLICY_DISABLED),
+    );
+
+    let addr = signer::address_of(owner);
+    if (!exists<PackageRegistry>(addr)) {
+        move_to(owner, PackageRegistry { packages: vector::empty() })
+    };
+
+    // Checks for valid dependencies to other packages
+    let allowed_deps = check_dependencies(addr, &pack);
+
+    // Check package against conflicts
+    // To avoid prover compiler error on spec
+    // the package need to be an immutable variable
+    let module_names = get_module_names(&pack);
+    let package_immutable = &borrow_global<PackageRegistry>(addr).packages;
+    let len = vector::length(package_immutable);
+    let index = len;
+    let upgrade_number = 0;
+    vector::enumerate_ref(package_immutable
+    , |i, old| {
+        let old: &PackageMetadata = old;
+        if (old.name == pack.name) {
+            upgrade_number = old.upgrade_number + 1;
+            check_upgradability(old, &pack, &module_names);
+            index = i;
+        } else {
+            check_coexistence(old, &module_names)
+        };
+    });
+
+    // Assign the upgrade counter.
+    pack.upgrade_number = upgrade_number;
+
+    let packages = &mut borrow_global_mut<PackageRegistry>(addr).packages;
+    // Update registry
+    let policy = pack.upgrade_policy;
+    if (index < len) {
+        *vector::borrow_mut(packages, index) = pack
+    } else {
+        vector::push_back(packages, pack)
+    };
+
+    event::emit(PublishPackage {
+        code_address: addr,
+        is_upgrade: upgrade_number > 0
+    });
+
+    // Request publish
+    if (features::code_dependency_check_enabled())
+        request_publish_with_allowed_deps(addr, module_names, allowed_deps, code, policy.policy)
+    else
+    // The new `request_publish_with_allowed_deps` has not yet rolled out, so call downwards
+    // compatible code.
+        request_publish(addr, module_names, code, policy.policy)
+}
+
+ + + +
+ + + +## Function `freeze_code_object` + + + +
public fun freeze_code_object(publisher: &signer, code_object: object::Object<code::PackageRegistry>)
+
+ + + +
+Implementation + + +
public fun freeze_code_object(publisher: &signer, code_object: Object<PackageRegistry>) acquires PackageRegistry {
+    let code_object_addr = object::object_address(&code_object);
+    assert!(exists<PackageRegistry>(code_object_addr), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST));
+    assert!(
+        object::is_owner(code_object, signer::address_of(publisher)),
+        error::permission_denied(ENOT_PACKAGE_OWNER)
+    );
+
+    let registry = borrow_global_mut<PackageRegistry>(code_object_addr);
+    vector::for_each_mut(&mut registry.packages, |pack| {
+        let package: &mut PackageMetadata = pack;
+        package.upgrade_policy = upgrade_policy_immutable();
+    });
+
+    // We unfortunately have to make a copy of each package to avoid borrow checker issues as check_dependencies
+    // needs to borrow PackageRegistry from the dependency packages.
+    // This would increase the amount of gas used, but this is a rare operation and it's rare to have many packages
+    // in a single code object.
+    vector::for_each(registry.packages, |pack| {
+        check_dependencies(code_object_addr, &pack);
+    });
+}
+
+ + + +
+ + + +## Function `publish_package_txn` + +Same as publish_package but as an entry function which can be called as a transaction. Because +of current restrictions for txn parameters, the metadata needs to be passed in serialized form. + + +
public entry fun publish_package_txn(owner: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun publish_package_txn(owner: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>)
+acquires PackageRegistry {
+    publish_package(owner, util::from_bytes<PackageMetadata>(metadata_serialized), code)
+}
+
+ + + +
+ + + +## Function `check_upgradability` + +Checks whether the given package is upgradable, and returns true if a compatibility check is needed. + + +
fun check_upgradability(old_pack: &code::PackageMetadata, new_pack: &code::PackageMetadata, new_modules: &vector<string::String>)
+
+ + + +
+Implementation + + +
fun check_upgradability(
+    old_pack: &PackageMetadata, new_pack: &PackageMetadata, new_modules: &vector<String>) {
+    assert!(old_pack.upgrade_policy.policy < upgrade_policy_immutable().policy,
+        error::invalid_argument(EUPGRADE_IMMUTABLE));
+    assert!(can_change_upgrade_policy_to(old_pack.upgrade_policy, new_pack.upgrade_policy),
+        error::invalid_argument(EUPGRADE_WEAKER_POLICY));
+    let old_modules = get_module_names(old_pack);
+
+    vector::for_each_ref(&old_modules, |old_module| {
+        assert!(
+            vector::contains(new_modules, old_module),
+            EMODULE_MISSING
+        );
+    });
+}
+
+ + + +
+ + + +## Function `check_coexistence` + +Checks whether a new package with given names can co-exist with old package. + + +
fun check_coexistence(old_pack: &code::PackageMetadata, new_modules: &vector<string::String>)
+
+ + + +
+Implementation + + +
fun check_coexistence(old_pack: &PackageMetadata, new_modules: &vector<String>) {
+    // The modules introduced by each package must not overlap with `names`.
+    vector::for_each_ref(&old_pack.modules, |old_mod| {
+        let old_mod: &ModuleMetadata = old_mod;
+        let j = 0;
+        while (j < vector::length(new_modules)) {
+            let name = vector::borrow(new_modules, j);
+            assert!(&old_mod.name != name, error::already_exists(EMODULE_NAME_CLASH));
+            j = j + 1;
+        };
+    });
+}
+
+ + + +
+ + + +## Function `check_dependencies` + +Check that the upgrade policies of all packages are equal or higher quality than this package. Also +compute the list of module dependencies which are allowed by the package metadata. The later +is passed on to the native layer to verify that bytecode dependencies are actually what is pretended here. + + +
fun check_dependencies(publish_address: address, pack: &code::PackageMetadata): vector<code::AllowedDep>
+
+ + + +
+Implementation + + +
fun check_dependencies(publish_address: address, pack: &PackageMetadata): vector<AllowedDep>
+acquires PackageRegistry {
+    let allowed_module_deps = vector::empty();
+    let deps = &pack.deps;
+    vector::for_each_ref(deps, |dep| {
+        let dep: &PackageDep = dep;
+        assert!(exists<PackageRegistry>(dep.account), error::not_found(EPACKAGE_DEP_MISSING));
+        if (is_policy_exempted_address(dep.account)) {
+            // Allow all modules from this address, by using "" as a wildcard in the AllowedDep
+            let account: address = dep.account;
+            let module_name = string::utf8(b"");
+            vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name });
+        } else {
+            let registry = borrow_global<PackageRegistry>(dep.account);
+            let found = vector::any(®istry.packages, |dep_pack| {
+                let dep_pack: &PackageMetadata = dep_pack;
+                if (dep_pack.name == dep.package_name) {
+                    // Check policy
+                    assert!(
+                        dep_pack.upgrade_policy.policy >= pack.upgrade_policy.policy,
+                        error::invalid_argument(EDEP_WEAKER_POLICY)
+                    );
+                    if (dep_pack.upgrade_policy == upgrade_policy_arbitrary()) {
+                        assert!(
+                            dep.account == publish_address,
+                            error::invalid_argument(EDEP_ARBITRARY_NOT_SAME_ADDRESS)
+                        )
+                    };
+                    // Add allowed deps
+                    let account = dep.account;
+                    let k = 0;
+                    let r = vector::length(&dep_pack.modules);
+                    while (k < r) {
+                        let module_name = vector::borrow(&dep_pack.modules, k).name;
+                        vector::push_back(&mut allowed_module_deps, AllowedDep { account, module_name });
+                        k = k + 1;
+                    };
+                    true
+                } else {
+                    false
+                }
+            });
+            assert!(found, error::not_found(EPACKAGE_DEP_MISSING));
+        };
+    });
+    allowed_module_deps
+}
+
+ + + +
+ + + +## Function `is_policy_exempted_address` + +Core addresses which are exempted from the check that their policy matches the referring package. Without +this exemption, it would not be possible to define an immutable package based on the core system, which +requires to be upgradable for maintenance and evolution, and is configured to be compatible. + + +
fun is_policy_exempted_address(addr: address): bool
+
+ + + +
+Implementation + + +
fun is_policy_exempted_address(addr: address): bool {
+    addr == @1 || addr == @2 || addr == @3 || addr == @4 || addr == @5 ||
+        addr == @6 || addr == @7 || addr == @8 || addr == @9 || addr == @10
+}
+
+ + + +
+ + + +## Function `get_module_names` + +Get the names of the modules in a package. + + +
fun get_module_names(pack: &code::PackageMetadata): vector<string::String>
+
+ + + +
+Implementation + + +
fun get_module_names(pack: &PackageMetadata): vector<String> {
+    let module_names = vector::empty();
+    vector::for_each_ref(&pack.modules, |pack_module| {
+        let pack_module: &ModuleMetadata = pack_module;
+        vector::push_back(&mut module_names, pack_module.name);
+    });
+    module_names
+}
+
+ + + +
+ + + +## Function `request_publish` + +Native function to initiate module loading + + +
fun request_publish(owner: address, expected_modules: vector<string::String>, bundle: vector<vector<u8>>, policy: u8)
+
+ + + +
+Implementation + + +
native fun request_publish(
+    owner: address,
+    expected_modules: vector<String>,
+    bundle: vector<vector<u8>>,
+    policy: u8
+);
+
+ + + +
+ + + +## Function `request_publish_with_allowed_deps` + +Native function to initiate module loading, including a list of allowed dependencies. + + +
fun request_publish_with_allowed_deps(owner: address, expected_modules: vector<string::String>, allowed_deps: vector<code::AllowedDep>, bundle: vector<vector<u8>>, policy: u8)
+
+ + + +
+Implementation + + +
native fun request_publish_with_allowed_deps(
+    owner: address,
+    expected_modules: vector<String>,
+    allowed_deps: vector<AllowedDep>,
+    bundle: vector<vector<u8>>,
+    policy: u8
+);
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Updating a package should fail if the user is not the owner of it.CriticalThe publish_package function may only be able to update the package if the signer is the actual owner of the package.The Aptos upgrade native functions have been manually audited.
2The arbitrary upgrade policy should never be used.CriticalThere should never be a pass of an arbitrary upgrade policy to the request_publish native function.Manually audited that it aborts if package.upgrade_policy.policy == 0.
3Should perform accurate compatibility checks when the policy indicates compatibility, ensuring it meets the required conditions.CriticalSpecifies if it should perform compatibility checks for upgrades. The check only passes if a new module has (a) the same public functions, and (b) for existing resources, no layout change.The Move upgradability patterns have been manually audited.
4Package upgrades should abide by policy change rules. In particular, The new upgrade policy must be equal to or stricter when compared to the old one. The original upgrade policy must not be immutable. The new package must contain all modules contained in the old package.MediumA package may only be updated using the publish_package function when the check_upgradability function returns true.This is audited by a manual review of the check_upgradability patterns.
5The upgrade policy of a package must not exceed the strictness level imposed by its dependencies.MediumThe upgrade_policy of a package may only be less than its dependencies throughout the upgrades. In addition, the native code properly restricts the use of dependencies outside the passed-in metadata.This has been manually audited.
6The extension for package metadata is currently unused.MediumThe extension field in PackageMetadata should be unused.Data invariant on the extension field has been manually audited.
7The upgrade number of a package increases incrementally in a monotonic manner with each subsequent upgrade.LowOn each upgrade of a particular package, the publish_package function updates the upgrade_number for that package.Post condition on upgrade_number has been manually audited.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
fun initialize(aptos_framework: &signer, package_owner: &signer, metadata: code::PackageMetadata)
+
+ + + + +
let aptos_addr = signer::address_of(aptos_framework);
+let owner_addr = signer::address_of(package_owner);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+ensures exists<PackageRegistry>(owner_addr);
+
+ + + + + +### Function `publish_package` + + +
public fun publish_package(owner: &signer, pack: code::PackageMetadata, code: vector<vector<u8>>)
+
+ + + + +
pragma aborts_if_is_partial;
+let addr = signer::address_of(owner);
+modifies global<PackageRegistry>(addr);
+aborts_if pack.upgrade_policy.policy <= upgrade_policy_arbitrary().policy;
+
+ + + + + +### Function `freeze_code_object` + + +
public fun freeze_code_object(publisher: &signer, code_object: object::Object<code::PackageRegistry>)
+
+ + + + +
pragma aborts_if_is_partial;
+let code_object_addr = code_object.inner;
+aborts_if !exists<object::ObjectCore>(code_object_addr);
+aborts_if !exists<PackageRegistry>(code_object_addr);
+aborts_if !object::is_owner(code_object, signer::address_of(publisher));
+modifies global<PackageRegistry>(code_object_addr);
+
+ + + + + +### Function `publish_package_txn` + + +
public entry fun publish_package_txn(owner: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `check_upgradability` + + +
fun check_upgradability(old_pack: &code::PackageMetadata, new_pack: &code::PackageMetadata, new_modules: &vector<string::String>)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if old_pack.upgrade_policy.policy >= upgrade_policy_immutable().policy;
+aborts_if !can_change_upgrade_policy_to(old_pack.upgrade_policy, new_pack.upgrade_policy);
+
+ + + + + +### Function `check_coexistence` + + +
fun check_coexistence(old_pack: &code::PackageMetadata, new_modules: &vector<string::String>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `check_dependencies` + + +
fun check_dependencies(publish_address: address, pack: &code::PackageMetadata): vector<code::AllowedDep>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `get_module_names` + + +
fun get_module_names(pack: &code::PackageMetadata): vector<string::String>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] len(result) == len(pack.modules);
+ensures [abstract] forall i in 0..len(result): result[i] == pack.modules[i].name;
+
+ + + + + +### Function `request_publish` + + +
fun request_publish(owner: address, expected_modules: vector<string::String>, bundle: vector<vector<u8>>, policy: u8)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `request_publish_with_allowed_deps` + + +
fun request_publish_with_allowed_deps(owner: address, expected_modules: vector<string::String>, allowed_deps: vector<code::AllowedDep>, bundle: vector<vector<u8>>, policy: u8)
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/coin.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/coin.md new file mode 100644 index 0000000000000..f232014d2925e --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/coin.md @@ -0,0 +1,4830 @@ + + + +# Module `0x1::coin` + +This module provides the foundation for typesafe Coins. + + +- [Struct `Coin`](#0x1_coin_Coin) +- [Struct `AggregatableCoin`](#0x1_coin_AggregatableCoin) +- [Resource `CoinStore`](#0x1_coin_CoinStore) +- [Resource `SupplyConfig`](#0x1_coin_SupplyConfig) +- [Resource `CoinInfo`](#0x1_coin_CoinInfo) +- [Struct `CoinDeposit`](#0x1_coin_CoinDeposit) +- [Struct `CoinWithdraw`](#0x1_coin_CoinWithdraw) +- [Struct `Deposit`](#0x1_coin_Deposit) +- [Struct `Withdraw`](#0x1_coin_Withdraw) +- [Struct `DepositEvent`](#0x1_coin_DepositEvent) +- [Struct `WithdrawEvent`](#0x1_coin_WithdrawEvent) +- [Struct `CoinEventHandleDeletion`](#0x1_coin_CoinEventHandleDeletion) +- [Struct `PairCreation`](#0x1_coin_PairCreation) +- [Resource `MigrationFlag`](#0x1_coin_MigrationFlag) +- [Struct `MintCapability`](#0x1_coin_MintCapability) +- [Struct `FreezeCapability`](#0x1_coin_FreezeCapability) +- [Struct `BurnCapability`](#0x1_coin_BurnCapability) +- [Resource `CoinConversionMap`](#0x1_coin_CoinConversionMap) +- [Resource `PairedCoinType`](#0x1_coin_PairedCoinType) +- [Resource `PairedFungibleAssetRefs`](#0x1_coin_PairedFungibleAssetRefs) +- [Struct `MintRefReceipt`](#0x1_coin_MintRefReceipt) +- [Struct `TransferRefReceipt`](#0x1_coin_TransferRefReceipt) +- [Struct `BurnRefReceipt`](#0x1_coin_BurnRefReceipt) +- [Resource `Ghost$supply`](#0x1_coin_Ghost$supply) +- [Resource `Ghost$aggregate_supply`](#0x1_coin_Ghost$aggregate_supply) +- [Constants](#@Constants_0) +- [Function `paired_metadata`](#0x1_coin_paired_metadata) +- [Function `create_coin_conversion_map`](#0x1_coin_create_coin_conversion_map) +- [Function `create_pairing`](#0x1_coin_create_pairing) +- [Function `is_apt`](#0x1_coin_is_apt) +- [Function `create_and_return_paired_metadata_if_not_exist`](#0x1_coin_create_and_return_paired_metadata_if_not_exist) +- [Function `ensure_paired_metadata`](#0x1_coin_ensure_paired_metadata) +- [Function `paired_coin`](#0x1_coin_paired_coin) +- [Function `coin_to_fungible_asset`](#0x1_coin_coin_to_fungible_asset) +- [Function `fungible_asset_to_coin`](#0x1_coin_fungible_asset_to_coin) +- [Function `assert_paired_metadata_exists`](#0x1_coin_assert_paired_metadata_exists) +- [Function `paired_mint_ref_exists`](#0x1_coin_paired_mint_ref_exists) +- [Function `get_paired_mint_ref`](#0x1_coin_get_paired_mint_ref) +- [Function `return_paired_mint_ref`](#0x1_coin_return_paired_mint_ref) +- [Function `paired_transfer_ref_exists`](#0x1_coin_paired_transfer_ref_exists) +- [Function `get_paired_transfer_ref`](#0x1_coin_get_paired_transfer_ref) +- [Function `return_paired_transfer_ref`](#0x1_coin_return_paired_transfer_ref) +- [Function `paired_burn_ref_exists`](#0x1_coin_paired_burn_ref_exists) +- [Function `get_paired_burn_ref`](#0x1_coin_get_paired_burn_ref) +- [Function `convert_and_take_paired_burn_ref`](#0x1_coin_convert_and_take_paired_burn_ref) +- [Function `return_paired_burn_ref`](#0x1_coin_return_paired_burn_ref) +- [Function `borrow_paired_burn_ref`](#0x1_coin_borrow_paired_burn_ref) +- [Function `initialize_supply_config`](#0x1_coin_initialize_supply_config) +- [Function `allow_supply_upgrades`](#0x1_coin_allow_supply_upgrades) +- [Function `initialize_aggregatable_coin`](#0x1_coin_initialize_aggregatable_coin) +- [Function `is_aggregatable_coin_zero`](#0x1_coin_is_aggregatable_coin_zero) +- [Function `drain_aggregatable_coin`](#0x1_coin_drain_aggregatable_coin) +- [Function `merge_aggregatable_coin`](#0x1_coin_merge_aggregatable_coin) +- [Function `collect_into_aggregatable_coin`](#0x1_coin_collect_into_aggregatable_coin) +- [Function `calculate_amount_to_withdraw`](#0x1_coin_calculate_amount_to_withdraw) +- [Function `maybe_convert_to_fungible_store`](#0x1_coin_maybe_convert_to_fungible_store) +- [Function `migrate_to_fungible_store`](#0x1_coin_migrate_to_fungible_store) +- [Function `coin_address`](#0x1_coin_coin_address) +- [Function `balance`](#0x1_coin_balance) +- [Function `is_balance_at_least`](#0x1_coin_is_balance_at_least) +- [Function `coin_balance`](#0x1_coin_coin_balance) +- [Function `is_coin_initialized`](#0x1_coin_is_coin_initialized) +- [Function `is_coin_store_frozen`](#0x1_coin_is_coin_store_frozen) +- [Function `is_account_registered`](#0x1_coin_is_account_registered) +- [Function `name`](#0x1_coin_name) +- [Function `symbol`](#0x1_coin_symbol) +- [Function `decimals`](#0x1_coin_decimals) +- [Function `supply`](#0x1_coin_supply) +- [Function `coin_supply`](#0x1_coin_coin_supply) +- [Function `burn`](#0x1_coin_burn) +- [Function `burn_from`](#0x1_coin_burn_from) +- [Function `deposit`](#0x1_coin_deposit) +- [Function `migrated_primary_fungible_store_exists`](#0x1_coin_migrated_primary_fungible_store_exists) +- [Function `force_deposit`](#0x1_coin_force_deposit) +- [Function `destroy_zero`](#0x1_coin_destroy_zero) +- [Function `extract`](#0x1_coin_extract) +- [Function `extract_all`](#0x1_coin_extract_all) +- [Function `freeze_coin_store`](#0x1_coin_freeze_coin_store) +- [Function `unfreeze_coin_store`](#0x1_coin_unfreeze_coin_store) +- [Function `upgrade_supply`](#0x1_coin_upgrade_supply) +- [Function `initialize`](#0x1_coin_initialize) +- [Function `initialize_with_parallelizable_supply`](#0x1_coin_initialize_with_parallelizable_supply) +- [Function `initialize_internal`](#0x1_coin_initialize_internal) +- [Function `merge`](#0x1_coin_merge) +- [Function `mint`](#0x1_coin_mint) +- [Function `register`](#0x1_coin_register) +- [Function `transfer`](#0x1_coin_transfer) +- [Function `value`](#0x1_coin_value) +- [Function `withdraw`](#0x1_coin_withdraw) +- [Function `zero`](#0x1_coin_zero) +- [Function `destroy_freeze_cap`](#0x1_coin_destroy_freeze_cap) +- [Function `destroy_mint_cap`](#0x1_coin_destroy_mint_cap) +- [Function `destroy_burn_cap`](#0x1_coin_destroy_burn_cap) +- [Function `mint_internal`](#0x1_coin_mint_internal) +- [Function `burn_internal`](#0x1_coin_burn_internal) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Struct `AggregatableCoin`](#@Specification_1_AggregatableCoin) + - [Function `coin_to_fungible_asset`](#@Specification_1_coin_to_fungible_asset) + - [Function `fungible_asset_to_coin`](#@Specification_1_fungible_asset_to_coin) + - [Function `initialize_supply_config`](#@Specification_1_initialize_supply_config) + - [Function `allow_supply_upgrades`](#@Specification_1_allow_supply_upgrades) + - [Function `initialize_aggregatable_coin`](#@Specification_1_initialize_aggregatable_coin) + - [Function `is_aggregatable_coin_zero`](#@Specification_1_is_aggregatable_coin_zero) + - [Function `drain_aggregatable_coin`](#@Specification_1_drain_aggregatable_coin) + - [Function `merge_aggregatable_coin`](#@Specification_1_merge_aggregatable_coin) + - [Function `collect_into_aggregatable_coin`](#@Specification_1_collect_into_aggregatable_coin) + - [Function `maybe_convert_to_fungible_store`](#@Specification_1_maybe_convert_to_fungible_store) + - [Function `coin_address`](#@Specification_1_coin_address) + - [Function `balance`](#@Specification_1_balance) + - [Function `is_coin_initialized`](#@Specification_1_is_coin_initialized) + - [Function `is_account_registered`](#@Specification_1_is_account_registered) + - [Function `name`](#@Specification_1_name) + - [Function `symbol`](#@Specification_1_symbol) + - [Function `decimals`](#@Specification_1_decimals) + - [Function `supply`](#@Specification_1_supply) + - [Function `coin_supply`](#@Specification_1_coin_supply) + - [Function `burn`](#@Specification_1_burn) + - [Function `burn_from`](#@Specification_1_burn_from) + - [Function `deposit`](#@Specification_1_deposit) + - [Function `force_deposit`](#@Specification_1_force_deposit) + - [Function `destroy_zero`](#@Specification_1_destroy_zero) + - [Function `extract`](#@Specification_1_extract) + - [Function `extract_all`](#@Specification_1_extract_all) + - [Function `freeze_coin_store`](#@Specification_1_freeze_coin_store) + - [Function `unfreeze_coin_store`](#@Specification_1_unfreeze_coin_store) + - [Function `upgrade_supply`](#@Specification_1_upgrade_supply) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `initialize_with_parallelizable_supply`](#@Specification_1_initialize_with_parallelizable_supply) + - [Function `initialize_internal`](#@Specification_1_initialize_internal) + - [Function `merge`](#@Specification_1_merge) + - [Function `mint`](#@Specification_1_mint) + - [Function `register`](#@Specification_1_register) + - [Function `transfer`](#@Specification_1_transfer) + - [Function `withdraw`](#@Specification_1_withdraw) + - [Function `mint_internal`](#@Specification_1_mint_internal) + - [Function `burn_internal`](#@Specification_1_burn_internal) + + +
use 0x1::account;
+use 0x1::aggregator;
+use 0x1::aggregator_factory;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::fungible_asset;
+use 0x1::guid;
+use 0x1::object;
+use 0x1::option;
+use 0x1::optional_aggregator;
+use 0x1::primary_fungible_store;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::table;
+use 0x1::type_info;
+
+ + + + + +## Struct `Coin` + +Core data structures +Main structure representing a coin/token in an account's custody. + + +
struct Coin<CoinType> has store
+
+ + + +
+Fields + + +
+
+value: u64 +
+
+ Amount of coin this address has. +
+
+ + +
+ + + +## Struct `AggregatableCoin` + +Represents a coin with aggregator as its value. This allows to update +the coin in every transaction avoiding read-modify-write conflicts. Only +used for gas fees distribution by Aptos Framework (0x1). + + +
struct AggregatableCoin<CoinType> has store
+
+ + + +
+Fields + + +
+
+value: aggregator::Aggregator +
+
+ Amount of aggregatable coin this address has. +
+
+ + +
+ + + +## Resource `CoinStore` + +A holder of a specific coin types and associated event handles. +These are kept in a single resource to ensure locality of data. + + +
struct CoinStore<CoinType> has key
+
+ + + +
+Fields + + +
+
+coin: coin::Coin<CoinType> +
+
+ +
+
+frozen: bool +
+
+ +
+
+deposit_events: event::EventHandle<coin::DepositEvent> +
+
+ +
+
+withdraw_events: event::EventHandle<coin::WithdrawEvent> +
+
+ +
+
+ + +
+ + + +## Resource `SupplyConfig` + +Configuration that controls the behavior of total coin supply. If the field +is set, coin creators are allowed to upgrade to parallelizable implementations. + + +
struct SupplyConfig has key
+
+ + + +
+Fields + + +
+
+allow_upgrades: bool +
+
+ +
+
+ + +
+ + + +## Resource `CoinInfo` + +Information about a specific coin type. Stored on the creator of the coin's account. + + +
struct CoinInfo<CoinType> has key
+
+ + + +
+Fields + + +
+
+name: string::String +
+
+ +
+
+symbol: string::String +
+
+ Symbol of the coin, usually a shorter version of the name. + For example, Singapore Dollar is SGD. +
+
+decimals: u8 +
+
+ Number of decimals used to get its user representation. + For example, if decimals equals 2, a balance of 505 coins should + be displayed to a user as 5.05 (505 / 10 ** 2). +
+
+supply: option::Option<optional_aggregator::OptionalAggregator> +
+
+ Amount of this coin type in existence. +
+
+ + +
+ + + +## Struct `CoinDeposit` + +Module event emitted when some amount of a coin is deposited into an account. + + +
#[event]
+struct CoinDeposit has drop, store
+
+ + + +
+Fields + + +
+
+coin_type: string::String +
+
+ +
+
+account: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CoinWithdraw` + +Module event emitted when some amount of a coin is withdrawn from an account. + + +
#[event]
+struct CoinWithdraw has drop, store
+
+ + + +
+Fields + + +
+
+coin_type: string::String +
+
+ +
+
+account: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Deposit` + + + +
#[event]
+#[deprecated]
+struct Deposit<CoinType> has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Withdraw` + + + +
#[event]
+#[deprecated]
+struct Withdraw<CoinType> has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DepositEvent` + +Event emitted when some amount of a coin is deposited into an account. + + +
struct DepositEvent has drop, store
+
+ + + +
+Fields + + +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawEvent` + +Event emitted when some amount of a coin is withdrawn from an account. + + +
struct WithdrawEvent has drop, store
+
+ + + +
+Fields + + +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CoinEventHandleDeletion` + +Module event emitted when the event handles related to coin store is deleted. + + +
#[event]
+struct CoinEventHandleDeletion has drop, store
+
+ + + +
+Fields + + +
+
+event_handle_creation_address: address +
+
+ +
+
+deleted_deposit_event_handle_creation_number: u64 +
+
+ +
+
+deleted_withdraw_event_handle_creation_number: u64 +
+
+ +
+
+ + +
+ + + +## Struct `PairCreation` + +Module event emitted when a new pair of coin and fungible asset is created. + + +
#[event]
+struct PairCreation has drop, store
+
+ + + +
+Fields + + +
+
+coin_type: type_info::TypeInfo +
+
+ +
+
+fungible_asset_metadata_address: address +
+
+ +
+
+ + +
+ + + +## Resource `MigrationFlag` + +The flag the existence of which indicates the primary fungible store is created by the migration from CoinStore. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct MigrationFlag has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `MintCapability` + +Capability required to mint coins. + + +
struct MintCapability<CoinType> has copy, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `FreezeCapability` + +Capability required to freeze a coin store. + + +
struct FreezeCapability<CoinType> has copy, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `BurnCapability` + +Capability required to burn coins. + + +
struct BurnCapability<CoinType> has copy, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `CoinConversionMap` + +The mapping between coin and fungible asset. + + +
struct CoinConversionMap has key
+
+ + + +
+Fields + + +
+
+coin_to_fungible_asset_map: table::Table<type_info::TypeInfo, object::Object<fungible_asset::Metadata>> +
+
+ +
+
+ + +
+ + + +## Resource `PairedCoinType` + +The paired coin type info stored in fungible asset metadata object. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct PairedCoinType has key
+
+ + + +
+Fields + + +
+
+type: type_info::TypeInfo +
+
+ +
+
+ + +
+ + + +## Resource `PairedFungibleAssetRefs` + +The refs of the paired fungible asset. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct PairedFungibleAssetRefs has key
+
+ + + +
+Fields + + +
+
+mint_ref_opt: option::Option<fungible_asset::MintRef> +
+
+ +
+
+transfer_ref_opt: option::Option<fungible_asset::TransferRef> +
+
+ +
+
+burn_ref_opt: option::Option<fungible_asset::BurnRef> +
+
+ +
+
+ + +
+ + + +## Struct `MintRefReceipt` + +The hot potato receipt for flash borrowing MintRef. + + +
struct MintRefReceipt
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `TransferRefReceipt` + +The hot potato receipt for flash borrowing TransferRef. + + +
struct TransferRefReceipt
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `BurnRefReceipt` + +The hot potato receipt for flash borrowing BurnRef. + + +
struct BurnRefReceipt
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$supply` + + + +
struct Ghost$supply<CoinType> has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: num +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$aggregate_supply` + + + +
struct Ghost$aggregate_supply<CoinType> has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: num +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Maximum possible aggregatable coin value. + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + +Maximum possible coin supply. + + +
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
+ + + + + +Not enough coins to complete transaction + + +
const EINSUFFICIENT_BALANCE: u64 = 6;
+
+ + + + + +The value of aggregatable coin used for transaction fees redistribution does not fit in u64. + + +
const EAGGREGATABLE_COIN_VALUE_TOO_LARGE: u64 = 14;
+
+ + + + + +APT pairing is not eanbled yet. + + +
const EAPT_PAIRING_IS_NOT_ENABLED: u64 = 28;
+
+ + + + + +The BurnRef does not exist. + + +
const EBURN_REF_NOT_FOUND: u64 = 25;
+
+ + + + + +The BurnRefReceipt does not match the BurnRef to be returned. + + +
const EBURN_REF_RECEIPT_MISMATCH: u64 = 24;
+
+ + + + + +The coin converison map is not created yet. + + +
const ECOIN_CONVERSION_MAP_NOT_FOUND: u64 = 27;
+
+ + + + + +Address of account which is used to initialize a coin CoinType doesn't match the deployer of module + + +
const ECOIN_INFO_ADDRESS_MISMATCH: u64 = 1;
+
+ + + + + +CoinType is already initialized as a coin + + +
const ECOIN_INFO_ALREADY_PUBLISHED: u64 = 2;
+
+ + + + + +CoinType hasn't been initialized as a coin + + +
const ECOIN_INFO_NOT_PUBLISHED: u64 = 3;
+
+ + + + + +Name of the coin is too long + + +
const ECOIN_NAME_TOO_LONG: u64 = 12;
+
+ + + + + +Deprecated. Account already has CoinStore registered for CoinType + + +
const ECOIN_STORE_ALREADY_PUBLISHED: u64 = 4;
+
+ + + + + +Account hasn't registered CoinStore for CoinType + + +
const ECOIN_STORE_NOT_PUBLISHED: u64 = 5;
+
+ + + + + +Cannot upgrade the total supply of coins to different implementation. + + +
const ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED: u64 = 11;
+
+ + + + + +Symbol of the coin is too long + + +
const ECOIN_SYMBOL_TOO_LONG: u64 = 13;
+
+ + + + + +The feature of migration from coin to fungible asset is not enabled. + + +
const ECOIN_TO_FUNGIBLE_ASSET_FEATURE_NOT_ENABLED: u64 = 18;
+
+ + + + + +The coin type from the map does not match the calling function type argument. + + +
const ECOIN_TYPE_MISMATCH: u64 = 17;
+
+ + + + + +Cannot destroy non-zero coins + + +
const EDESTRUCTION_OF_NONZERO_TOKEN: u64 = 7;
+
+ + + + + +CoinStore is frozen. Coins cannot be deposited or withdrawn + + +
const EFROZEN: u64 = 10;
+
+ + + + + +The migration process from coin to fungible asset is not enabled yet. + + +
const EMIGRATION_FRAMEWORK_NOT_ENABLED: u64 = 26;
+
+ + + + + +The MintRef does not exist. + + +
const EMINT_REF_NOT_FOUND: u64 = 21;
+
+ + + + + +The MintRefReceipt does not match the MintRef to be returned. + + +
const EMINT_REF_RECEIPT_MISMATCH: u64 = 20;
+
+ + + + + +Error regarding paired coin type of the fungible asset metadata. + + +
const EPAIRED_COIN: u64 = 15;
+
+ + + + + +Error regarding paired fungible asset metadata of a coin type. + + +
const EPAIRED_FUNGIBLE_ASSET: u64 = 16;
+
+ + + + + +PairedFungibleAssetRefs resource does not exist. + + +
const EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND: u64 = 19;
+
+ + + + + +The TransferRef does not exist. + + +
const ETRANSFER_REF_NOT_FOUND: u64 = 23;
+
+ + + + + +The TransferRefReceipt does not match the TransferRef to be returned. + + +
const ETRANSFER_REF_RECEIPT_MISMATCH: u64 = 22;
+
+ + + + + + + +
const MAX_COIN_NAME_LENGTH: u64 = 32;
+
+ + + + + + + +
const MAX_COIN_SYMBOL_LENGTH: u64 = 10;
+
+ + + + + +## Function `paired_metadata` + +Get the paired fungible asset metadata object of a coin type. If not exist, return option::none(). + + +
#[view]
+public fun paired_metadata<CoinType>(): option::Option<object::Object<fungible_asset::Metadata>>
+
+ + + +
+Implementation + + +
public fun paired_metadata<CoinType>(): Option<Object<Metadata>> acquires CoinConversionMap {
+    if (exists<CoinConversionMap>(@aptos_framework) && features::coin_to_fungible_asset_migration_feature_enabled(
+    )) {
+        let map = &borrow_global<CoinConversionMap>(@aptos_framework).coin_to_fungible_asset_map;
+        let type = type_info::type_of<CoinType>();
+        if (table::contains(map, type)) {
+            return option::some(*table::borrow(map, type))
+        }
+    };
+    option::none()
+}
+
+ + + +
+ + + +## Function `create_coin_conversion_map` + + + +
public entry fun create_coin_conversion_map(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun create_coin_conversion_map(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    if (!exists<CoinConversionMap>(@aptos_framework)) {
+        move_to(aptos_framework, CoinConversionMap {
+            coin_to_fungible_asset_map: table::new(),
+        })
+    };
+}
+
+ + + +
+ + + +## Function `create_pairing` + +Create APT pairing by passing AptosCoin. + + +
public entry fun create_pairing<CoinType>(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun create_pairing<CoinType>(
+    aptos_framework: &signer
+) acquires CoinConversionMap, CoinInfo {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    create_and_return_paired_metadata_if_not_exist<CoinType>(true);
+}
+
+ + + +
+ + + +## Function `is_apt` + + + +
fun is_apt<CoinType>(): bool
+
+ + + +
+Implementation + + +
inline fun is_apt<CoinType>(): bool {
+    type_info::type_name<CoinType>() == string::utf8(b"0x1::aptos_coin::AptosCoin")
+}
+
+ + + +
+ + + +## Function `create_and_return_paired_metadata_if_not_exist` + + + +
fun create_and_return_paired_metadata_if_not_exist<CoinType>(allow_apt_creation: bool): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
inline fun create_and_return_paired_metadata_if_not_exist<CoinType>(allow_apt_creation: bool): Object<Metadata> {
+    assert!(
+        features::coin_to_fungible_asset_migration_feature_enabled(),
+        error::invalid_state(EMIGRATION_FRAMEWORK_NOT_ENABLED)
+    );
+    assert!(exists<CoinConversionMap>(@aptos_framework), error::not_found(ECOIN_CONVERSION_MAP_NOT_FOUND));
+    let map = borrow_global_mut<CoinConversionMap>(@aptos_framework);
+    let type = type_info::type_of<CoinType>();
+    if (!table::contains(&map.coin_to_fungible_asset_map, type)) {
+        let is_apt = is_apt<CoinType>();
+        assert!(!is_apt || allow_apt_creation, error::invalid_state(EAPT_PAIRING_IS_NOT_ENABLED));
+        let metadata_object_cref =
+            if (is_apt) {
+                object::create_sticky_object_at_address(@aptos_framework, @aptos_fungible_asset)
+            } else {
+                object::create_named_object(
+                    &create_signer::create_signer(@aptos_fungible_asset),
+                    *string::bytes(&type_info::type_name<CoinType>())
+                )
+            };
+        primary_fungible_store::create_primary_store_enabled_fungible_asset(
+            &metadata_object_cref,
+            option::none(),
+            name<CoinType>(),
+            symbol<CoinType>(),
+            decimals<CoinType>(),
+            string::utf8(b""),
+            string::utf8(b""),
+        );
+
+        let metadata_object_signer = &object::generate_signer(&metadata_object_cref);
+        let type = type_info::type_of<CoinType>();
+        move_to(metadata_object_signer, PairedCoinType { type });
+        let metadata_obj = object::object_from_constructor_ref(&metadata_object_cref);
+
+        table::add(&mut map.coin_to_fungible_asset_map, type, metadata_obj);
+        event::emit(PairCreation {
+            coin_type: type,
+            fungible_asset_metadata_address: object_address(&metadata_obj)
+        });
+
+        // Generates all three refs
+        let mint_ref = fungible_asset::generate_mint_ref(&metadata_object_cref);
+        let transfer_ref = fungible_asset::generate_transfer_ref(&metadata_object_cref);
+        let burn_ref = fungible_asset::generate_burn_ref(&metadata_object_cref);
+        move_to(metadata_object_signer,
+            PairedFungibleAssetRefs {
+                mint_ref_opt: option::some(mint_ref),
+                transfer_ref_opt: option::some(transfer_ref),
+                burn_ref_opt: option::some(burn_ref),
+            }
+        );
+    };
+    *table::borrow(&map.coin_to_fungible_asset_map, type)
+}
+
+ + + +
+ + + +## Function `ensure_paired_metadata` + +Get the paired fungible asset metadata object of a coin type, create if not exist. + + +
public(friend) fun ensure_paired_metadata<CoinType>(): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public(friend) fun ensure_paired_metadata<CoinType>(): Object<Metadata> acquires CoinConversionMap, CoinInfo {
+    create_and_return_paired_metadata_if_not_exist<CoinType>(false)
+}
+
+ + + +
+ + + +## Function `paired_coin` + +Get the paired coin type of a fungible asset metadata object. + + +
#[view]
+public fun paired_coin(metadata: object::Object<fungible_asset::Metadata>): option::Option<type_info::TypeInfo>
+
+ + + +
+Implementation + + +
public fun paired_coin(metadata: Object<Metadata>): Option<TypeInfo> acquires PairedCoinType {
+    let metadata_addr = object::object_address(&metadata);
+    if (exists<PairedCoinType>(metadata_addr)) {
+        option::some(borrow_global<PairedCoinType>(metadata_addr).type)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `coin_to_fungible_asset` + +Conversion from coin to fungible asset + + +
public fun coin_to_fungible_asset<CoinType>(coin: coin::Coin<CoinType>): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun coin_to_fungible_asset<CoinType>(
+    coin: Coin<CoinType>
+): FungibleAsset acquires CoinConversionMap, CoinInfo {
+    let metadata = ensure_paired_metadata<CoinType>();
+    let amount = burn_internal(coin);
+    fungible_asset::mint_internal(metadata, amount)
+}
+
+ + + +
+ + + +## Function `fungible_asset_to_coin` + +Conversion from fungible asset to coin. Not public to push the migration to FA. + + +
fun fungible_asset_to_coin<CoinType>(fungible_asset: fungible_asset::FungibleAsset): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
fun fungible_asset_to_coin<CoinType>(
+    fungible_asset: FungibleAsset
+): Coin<CoinType> acquires CoinInfo, PairedCoinType {
+    let metadata_addr = object::object_address(&fungible_asset::metadata_from_asset(&fungible_asset));
+    assert!(
+        object::object_exists<PairedCoinType>(metadata_addr),
+        error::not_found(EPAIRED_COIN)
+    );
+    let coin_type_info = borrow_global<PairedCoinType>(metadata_addr).type;
+    assert!(coin_type_info == type_info::type_of<CoinType>(), error::invalid_argument(ECOIN_TYPE_MISMATCH));
+    let amount = fungible_asset::burn_internal(fungible_asset);
+    mint_internal<CoinType>(amount)
+}
+
+ + + +
+ + + +## Function `assert_paired_metadata_exists` + + + +
fun assert_paired_metadata_exists<CoinType>(): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
inline fun assert_paired_metadata_exists<CoinType>(): Object<Metadata> {
+    let metadata_opt = paired_metadata<CoinType>();
+    assert!(option::is_some(&metadata_opt), error::not_found(EPAIRED_FUNGIBLE_ASSET));
+    option::destroy_some(metadata_opt)
+}
+
+ + + +
+ + + +## Function `paired_mint_ref_exists` + +Check whether MintRef has not been taken. + + +
#[view]
+public fun paired_mint_ref_exists<CoinType>(): bool
+
+ + + +
+Implementation + + +
public fun paired_mint_ref_exists<CoinType>(): bool acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    option::is_some(&borrow_global<PairedFungibleAssetRefs>(metadata_addr).mint_ref_opt)
+}
+
+ + + +
+ + + +## Function `get_paired_mint_ref` + +Get the MintRef of paired fungible asset of a coin type from MintCapability. + + +
public fun get_paired_mint_ref<CoinType>(_: &coin::MintCapability<CoinType>): (fungible_asset::MintRef, coin::MintRefReceipt)
+
+ + + +
+Implementation + + +
public fun get_paired_mint_ref<CoinType>(
+    _: &MintCapability<CoinType>
+): (MintRef, MintRefReceipt) acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    let mint_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).mint_ref_opt;
+    assert!(option::is_some(mint_ref_opt), error::not_found(EMINT_REF_NOT_FOUND));
+    (option::extract(mint_ref_opt), MintRefReceipt { metadata })
+}
+
+ + + +
+ + + +## Function `return_paired_mint_ref` + +Return the MintRef with the hot potato receipt. + + +
public fun return_paired_mint_ref(mint_ref: fungible_asset::MintRef, receipt: coin::MintRefReceipt)
+
+ + + +
+Implementation + + +
public fun return_paired_mint_ref(mint_ref: MintRef, receipt: MintRefReceipt) acquires PairedFungibleAssetRefs {
+    let MintRefReceipt { metadata } = receipt;
+    assert!(
+        fungible_asset::mint_ref_metadata(&mint_ref) == metadata,
+        error::invalid_argument(EMINT_REF_RECEIPT_MISMATCH)
+    );
+    let metadata_addr = object_address(&metadata);
+    let mint_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).mint_ref_opt;
+    option::fill(mint_ref_opt, mint_ref);
+}
+
+ + + +
+ + + +## Function `paired_transfer_ref_exists` + +Check whether TransferRef still exists. + + +
#[view]
+public fun paired_transfer_ref_exists<CoinType>(): bool
+
+ + + +
+Implementation + + +
public fun paired_transfer_ref_exists<CoinType>(): bool acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    option::is_some(&borrow_global<PairedFungibleAssetRefs>(metadata_addr).transfer_ref_opt)
+}
+
+ + + +
+ + + +## Function `get_paired_transfer_ref` + +Get the TransferRef of paired fungible asset of a coin type from FreezeCapability. + + +
public fun get_paired_transfer_ref<CoinType>(_: &coin::FreezeCapability<CoinType>): (fungible_asset::TransferRef, coin::TransferRefReceipt)
+
+ + + +
+Implementation + + +
public fun get_paired_transfer_ref<CoinType>(
+    _: &FreezeCapability<CoinType>
+): (TransferRef, TransferRefReceipt) acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    let transfer_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).transfer_ref_opt;
+    assert!(option::is_some(transfer_ref_opt), error::not_found(ETRANSFER_REF_NOT_FOUND));
+    (option::extract(transfer_ref_opt), TransferRefReceipt { metadata })
+}
+
+ + + +
+ + + +## Function `return_paired_transfer_ref` + +Return the TransferRef with the hot potato receipt. + + +
public fun return_paired_transfer_ref(transfer_ref: fungible_asset::TransferRef, receipt: coin::TransferRefReceipt)
+
+ + + +
+Implementation + + +
public fun return_paired_transfer_ref(
+    transfer_ref: TransferRef,
+    receipt: TransferRefReceipt
+) acquires PairedFungibleAssetRefs {
+    let TransferRefReceipt { metadata } = receipt;
+    assert!(
+        fungible_asset::transfer_ref_metadata(&transfer_ref) == metadata,
+        error::invalid_argument(ETRANSFER_REF_RECEIPT_MISMATCH)
+    );
+    let metadata_addr = object_address(&metadata);
+    let transfer_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).transfer_ref_opt;
+    option::fill(transfer_ref_opt, transfer_ref);
+}
+
+ + + +
+ + + +## Function `paired_burn_ref_exists` + +Check whether BurnRef has not been taken. + + +
#[view]
+public fun paired_burn_ref_exists<CoinType>(): bool
+
+ + + +
+Implementation + + +
public fun paired_burn_ref_exists<CoinType>(): bool acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    option::is_some(&borrow_global<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt)
+}
+
+ + + +
+ + + +## Function `get_paired_burn_ref` + +Get the BurnRef of paired fungible asset of a coin type from BurnCapability. + + +
public fun get_paired_burn_ref<CoinType>(_: &coin::BurnCapability<CoinType>): (fungible_asset::BurnRef, coin::BurnRefReceipt)
+
+ + + +
+Implementation + + +
public fun get_paired_burn_ref<CoinType>(
+    _: &BurnCapability<CoinType>
+): (BurnRef, BurnRefReceipt) acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    let burn_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt;
+    assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND));
+    (option::extract(burn_ref_opt), BurnRefReceipt { metadata })
+}
+
+ + + +
+ + + +## Function `convert_and_take_paired_burn_ref` + + + +
public fun convert_and_take_paired_burn_ref<CoinType>(burn_cap: coin::BurnCapability<CoinType>): fungible_asset::BurnRef
+
+ + + +
+Implementation + + +
public fun convert_and_take_paired_burn_ref<CoinType>(
+    burn_cap: BurnCapability<CoinType>
+): BurnRef acquires CoinConversionMap, PairedFungibleAssetRefs {
+    destroy_burn_cap(burn_cap);
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    let burn_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt;
+    assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND));
+    option::extract(burn_ref_opt)
+}
+
+ + + +
+ + + +## Function `return_paired_burn_ref` + +Return the BurnRef with the hot potato receipt. + + +
public fun return_paired_burn_ref(burn_ref: fungible_asset::BurnRef, receipt: coin::BurnRefReceipt)
+
+ + + +
+Implementation + + +
public fun return_paired_burn_ref(
+    burn_ref: BurnRef,
+    receipt: BurnRefReceipt
+) acquires PairedFungibleAssetRefs {
+    let BurnRefReceipt { metadata } = receipt;
+    assert!(
+        fungible_asset::burn_ref_metadata(&burn_ref) == metadata,
+        error::invalid_argument(EBURN_REF_RECEIPT_MISMATCH)
+    );
+    let metadata_addr = object_address(&metadata);
+    let burn_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt;
+    option::fill(burn_ref_opt, burn_ref);
+}
+
+ + + +
+ + + +## Function `borrow_paired_burn_ref` + + + +
fun borrow_paired_burn_ref<CoinType>(_: &coin::BurnCapability<CoinType>): &fungible_asset::BurnRef
+
+ + + +
+Implementation + + +
inline fun borrow_paired_burn_ref<CoinType>(
+    _: &BurnCapability<CoinType>
+): &BurnRef acquires CoinConversionMap, PairedFungibleAssetRefs {
+    let metadata = assert_paired_metadata_exists<CoinType>();
+    let metadata_addr = object_address(&metadata);
+    assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND));
+    let burn_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt;
+    assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND));
+    option::borrow(burn_ref_opt)
+}
+
+ + + +
+ + + +## Function `initialize_supply_config` + +Publishes supply configuration. Initially, upgrading is not allowed. + + +
public(friend) fun initialize_supply_config(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize_supply_config(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(aptos_framework, SupplyConfig { allow_upgrades: false });
+}
+
+ + + +
+ + + +## Function `allow_supply_upgrades` + +This should be called by on-chain governance to update the config and allow +or disallow upgradability of total supply. + + +
public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool)
+
+ + + +
+Implementation + + +
public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool) acquires SupplyConfig {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let allow_upgrades = &mut borrow_global_mut<SupplyConfig>(@aptos_framework).allow_upgrades;
+    *allow_upgrades = allowed;
+}
+
+ + + +
+ + + +## Function `initialize_aggregatable_coin` + +Creates a new aggregatable coin with value overflowing on limit. Note that this function can +only be called by Aptos Framework (0x1) account for now because of create_aggregator. + + +
public(friend) fun initialize_aggregatable_coin<CoinType>(aptos_framework: &signer): coin::AggregatableCoin<CoinType>
+
+ + + +
+Implementation + + +
public(friend) fun initialize_aggregatable_coin<CoinType>(aptos_framework: &signer): AggregatableCoin<CoinType> {
+    let aggregator = aggregator_factory::create_aggregator(aptos_framework, MAX_U64);
+    AggregatableCoin<CoinType> {
+        value: aggregator,
+    }
+}
+
+ + + +
+ + + +## Function `is_aggregatable_coin_zero` + +Returns true if the value of aggregatable coin is zero. + + +
public(friend) fun is_aggregatable_coin_zero<CoinType>(coin: &coin::AggregatableCoin<CoinType>): bool
+
+ + + +
+Implementation + + +
public(friend) fun is_aggregatable_coin_zero<CoinType>(coin: &AggregatableCoin<CoinType>): bool {
+    let amount = aggregator::read(&coin.value);
+    amount == 0
+}
+
+ + + +
+ + + +## Function `drain_aggregatable_coin` + +Drains the aggregatable coin, setting it to zero and returning a standard coin. + + +
public(friend) fun drain_aggregatable_coin<CoinType>(coin: &mut coin::AggregatableCoin<CoinType>): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public(friend) fun drain_aggregatable_coin<CoinType>(coin: &mut AggregatableCoin<CoinType>): Coin<CoinType> {
+    spec {
+        // TODO: The data invariant is not properly assumed from CollectedFeesPerBlock.
+        assume aggregator::spec_get_limit(coin.value) == MAX_U64;
+    };
+    let amount = aggregator::read(&coin.value);
+    assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE));
+    spec {
+        update aggregate_supply<CoinType> = aggregate_supply<CoinType> - amount;
+    };
+    aggregator::sub(&mut coin.value, amount);
+    spec {
+        update supply<CoinType> = supply<CoinType> + amount;
+    };
+    Coin<CoinType> {
+        value: (amount as u64),
+    }
+}
+
+ + + +
+ + + +## Function `merge_aggregatable_coin` + +Merges coin into aggregatable coin (dst_coin). + + +
public(friend) fun merge_aggregatable_coin<CoinType>(dst_coin: &mut coin::AggregatableCoin<CoinType>, coin: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public(friend) fun merge_aggregatable_coin<CoinType>(
+    dst_coin: &mut AggregatableCoin<CoinType>,
+    coin: Coin<CoinType>
+) {
+    spec {
+        update supply<CoinType> = supply<CoinType> - coin.value;
+    };
+    let Coin { value } = coin;
+    let amount = (value as u128);
+    spec {
+        update aggregate_supply<CoinType> = aggregate_supply<CoinType> + amount;
+    };
+    aggregator::add(&mut dst_coin.value, amount);
+}
+
+ + + +
+ + + +## Function `collect_into_aggregatable_coin` + +Collects a specified amount of coin form an account into aggregatable coin. + + +
public(friend) fun collect_into_aggregatable_coin<CoinType>(account_addr: address, amount: u64, dst_coin: &mut coin::AggregatableCoin<CoinType>)
+
+ + + +
+Implementation + + +
public(friend) fun collect_into_aggregatable_coin<CoinType>(
+    account_addr: address,
+    amount: u64,
+    dst_coin: &mut AggregatableCoin<CoinType>,
+) acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType {
+    // Skip collecting if amount is zero.
+    if (amount == 0) {
+        return
+    };
+
+    let (coin_amount_to_collect, fa_amount_to_collect) = calculate_amount_to_withdraw<CoinType>(
+        account_addr,
+        amount
+    );
+    let coin = if (coin_amount_to_collect > 0) {
+        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+        extract(&mut coin_store.coin, coin_amount_to_collect)
+    } else {
+        zero()
+    };
+    if (fa_amount_to_collect > 0) {
+        let store_addr = primary_fungible_store::primary_store_address(
+            account_addr,
+            option::destroy_some(paired_metadata<CoinType>())
+        );
+        let fa = fungible_asset::withdraw_internal(store_addr, fa_amount_to_collect);
+        merge(&mut coin, fungible_asset_to_coin<CoinType>(fa));
+    };
+    merge_aggregatable_coin(dst_coin, coin);
+}
+
+ + + +
+ + + +## Function `calculate_amount_to_withdraw` + + + +
fun calculate_amount_to_withdraw<CoinType>(account_addr: address, amount: u64): (u64, u64)
+
+ + + +
+Implementation + + +
inline fun calculate_amount_to_withdraw<CoinType>(
+    account_addr: address,
+    amount: u64
+): (u64, u64) {
+    let coin_balance = coin_balance<CoinType>(account_addr);
+    if (coin_balance >= amount) {
+        (amount, 0)
+    } else {
+        let metadata = paired_metadata<CoinType>();
+        if (option::is_some(&metadata) && primary_fungible_store::primary_store_exists(
+            account_addr,
+            option::destroy_some(metadata)
+        ))
+            (coin_balance, amount - coin_balance)
+        else
+            abort error::invalid_argument(EINSUFFICIENT_BALANCE)
+    }
+}
+
+ + + +
+ + + +## Function `maybe_convert_to_fungible_store` + + + +
fun maybe_convert_to_fungible_store<CoinType>(account: address)
+
+ + + +
+Implementation + + +
fun maybe_convert_to_fungible_store<CoinType>(account: address) acquires CoinStore, CoinConversionMap, CoinInfo {
+    if (!features::coin_to_fungible_asset_migration_feature_enabled()) {
+        abort error::unavailable(ECOIN_TO_FUNGIBLE_ASSET_FEATURE_NOT_ENABLED)
+    };
+    assert!(is_coin_initialized<CoinType>(), error::invalid_argument(ECOIN_INFO_NOT_PUBLISHED));
+
+    let metadata = ensure_paired_metadata<CoinType>();
+    let store = primary_fungible_store::ensure_primary_store_exists(account, metadata);
+    let store_address = object::object_address(&store);
+    if (exists<CoinStore<CoinType>>(account)) {
+        let CoinStore<CoinType> { coin, frozen, deposit_events, withdraw_events } = move_from<CoinStore<CoinType>>(
+            account
+        );
+        event::emit(
+            CoinEventHandleDeletion {
+                event_handle_creation_address: guid::creator_address(
+                    event::guid(&deposit_events)
+                ),
+                deleted_deposit_event_handle_creation_number: guid::creation_num(event::guid(&deposit_events)),
+                deleted_withdraw_event_handle_creation_number: guid::creation_num(event::guid(&withdraw_events))
+            }
+        );
+        event::destroy_handle(deposit_events);
+        event::destroy_handle(withdraw_events);
+        if (coin.value == 0) {
+            destroy_zero(coin);
+        } else {
+            fungible_asset::deposit(store, coin_to_fungible_asset(coin));
+        };
+        // Note:
+        // It is possible the primary fungible store may already exist before this function call.
+        // In this case, if the account owns a frozen CoinStore and an unfrozen primary fungible store, this
+        // function would convert and deposit the rest coin into the primary store and freeze it to make the
+        // `frozen` semantic as consistent as possible.
+        if (frozen != fungible_asset::is_frozen(store)) {
+            fungible_asset::set_frozen_flag_internal(store, frozen);
+        }
+    };
+    if (!exists<MigrationFlag>(store_address)) {
+        move_to(&create_signer::create_signer(store_address), MigrationFlag {});
+    }
+}
+
+ + + +
+ + + +## Function `migrate_to_fungible_store` + +Voluntarily migrate to fungible store for CoinType if not yet. + + +
public entry fun migrate_to_fungible_store<CoinType>(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun migrate_to_fungible_store<CoinType>(
+    account: &signer
+) acquires CoinStore, CoinConversionMap, CoinInfo {
+    maybe_convert_to_fungible_store<CoinType>(signer::address_of(account));
+}
+
+ + + +
+ + + +## Function `coin_address` + +A helper function that returns the address of CoinType. + + +
fun coin_address<CoinType>(): address
+
+ + + +
+Implementation + + +
fun coin_address<CoinType>(): address {
+    let type_info = type_info::type_of<CoinType>();
+    type_info::account_address(&type_info)
+}
+
+ + + +
+ + + +## Function `balance` + +Returns the balance of owner for provided CoinType and its paired FA if exists. + + +
#[view]
+public fun balance<CoinType>(owner: address): u64
+
+ + + +
+Implementation + + +
public fun balance<CoinType>(owner: address): u64 acquires CoinConversionMap, CoinStore {
+    let paired_metadata = paired_metadata<CoinType>();
+    coin_balance<CoinType>(owner) + if (option::is_some(&paired_metadata)) {
+        primary_fungible_store::balance(
+            owner,
+            option::extract(&mut paired_metadata)
+        )
+    } else { 0 }
+}
+
+ + + +
+ + + +## Function `is_balance_at_least` + +Returns whether the balance of owner for provided CoinType and its paired FA is >= amount. + + +
#[view]
+public fun is_balance_at_least<CoinType>(owner: address, amount: u64): bool
+
+ + + +
+Implementation + + +
public fun is_balance_at_least<CoinType>(owner: address, amount: u64): bool acquires CoinConversionMap, CoinStore {
+    let coin_balance = coin_balance<CoinType>(owner);
+    if (coin_balance >= amount) {
+        return true
+    };
+
+    let paired_metadata = paired_metadata<CoinType>();
+    let left_amount = amount - coin_balance;
+    if (option::is_some(&paired_metadata)) {
+        primary_fungible_store::is_balance_at_least(
+            owner,
+            option::extract(&mut paired_metadata),
+            left_amount
+        )
+    } else { false }
+}
+
+ + + +
+ + + +## Function `coin_balance` + + + +
fun coin_balance<CoinType>(owner: address): u64
+
+ + + +
+Implementation + + +
inline fun coin_balance<CoinType>(owner: address): u64 {
+    if (exists<CoinStore<CoinType>>(owner)) {
+        borrow_global<CoinStore<CoinType>>(owner).coin.value
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `is_coin_initialized` + +Returns true if the type CoinType is an initialized coin. + + +
#[view]
+public fun is_coin_initialized<CoinType>(): bool
+
+ + + +
+Implementation + + +
public fun is_coin_initialized<CoinType>(): bool {
+    exists<CoinInfo<CoinType>>(coin_address<CoinType>())
+}
+
+ + + +
+ + + +## Function `is_coin_store_frozen` + +Returns true is account_addr has frozen the CoinStore or if it's not registered at all + + +
#[view]
+public fun is_coin_store_frozen<CoinType>(account_addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_coin_store_frozen<CoinType>(
+    account_addr: address
+): bool acquires CoinStore, CoinConversionMap {
+    if (!is_account_registered<CoinType>(account_addr)) {
+        return true
+    };
+
+    let coin_store = borrow_global<CoinStore<CoinType>>(account_addr);
+    coin_store.frozen
+}
+
+ + + +
+ + + +## Function `is_account_registered` + +Returns true if account_addr is registered to receive CoinType. + + +
#[view]
+public fun is_account_registered<CoinType>(account_addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_account_registered<CoinType>(account_addr: address): bool acquires CoinConversionMap {
+    assert!(is_coin_initialized<CoinType>(), error::invalid_argument(ECOIN_INFO_NOT_PUBLISHED));
+    if (exists<CoinStore<CoinType>>(account_addr)) {
+        true
+    } else {
+        let paired_metadata_opt = paired_metadata<CoinType>();
+        (option::is_some(
+            &paired_metadata_opt
+        ) && migrated_primary_fungible_store_exists(account_addr, option::destroy_some(paired_metadata_opt)))
+    }
+}
+
+ + + +
+ + + +## Function `name` + +Returns the name of the coin. + + +
#[view]
+public fun name<CoinType>(): string::String
+
+ + + +
+Implementation + + +
public fun name<CoinType>(): string::String acquires CoinInfo {
+    borrow_global<CoinInfo<CoinType>>(coin_address<CoinType>()).name
+}
+
+ + + +
+ + + +## Function `symbol` + +Returns the symbol of the coin, usually a shorter version of the name. + + +
#[view]
+public fun symbol<CoinType>(): string::String
+
+ + + +
+Implementation + + +
public fun symbol<CoinType>(): string::String acquires CoinInfo {
+    borrow_global<CoinInfo<CoinType>>(coin_address<CoinType>()).symbol
+}
+
+ + + +
+ + + +## Function `decimals` + +Returns the number of decimals used to get its user representation. +For example, if decimals equals 2, a balance of 505 coins should +be displayed to a user as 5.05 (505 / 10 ** 2). + + +
#[view]
+public fun decimals<CoinType>(): u8
+
+ + + +
+Implementation + + +
public fun decimals<CoinType>(): u8 acquires CoinInfo {
+    borrow_global<CoinInfo<CoinType>>(coin_address<CoinType>()).decimals
+}
+
+ + + +
+ + + +## Function `supply` + +Returns the amount of coin in existence. + + +
#[view]
+public fun supply<CoinType>(): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun supply<CoinType>(): Option<u128> acquires CoinInfo, CoinConversionMap {
+    let coin_supply = coin_supply<CoinType>();
+    let metadata = paired_metadata<CoinType>();
+    if (option::is_some(&metadata)) {
+        let fungible_asset_supply = fungible_asset::supply(option::extract(&mut metadata));
+        if (option::is_some(&coin_supply)) {
+            let supply = option::borrow_mut(&mut coin_supply);
+            *supply = *supply + option::destroy_some(fungible_asset_supply);
+        };
+    };
+    coin_supply
+}
+
+ + + +
+ + + +## Function `coin_supply` + +Returns the amount of coin in existence. + + +
#[view]
+public fun coin_supply<CoinType>(): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun coin_supply<CoinType>(): Option<u128> acquires CoinInfo {
+    let maybe_supply = &borrow_global<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
+    if (option::is_some(maybe_supply)) {
+        // We do track supply, in this case read from optional aggregator.
+        let supply = option::borrow(maybe_supply);
+        let value = optional_aggregator::read(supply);
+        option::some(value)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `burn` + +Burn coin with capability. +The capability _cap should be passed as a reference to BurnCapability<CoinType>. + + +
public fun burn<CoinType>(coin: coin::Coin<CoinType>, _cap: &coin::BurnCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun burn<CoinType>(coin: Coin<CoinType>, _cap: &BurnCapability<CoinType>) acquires CoinInfo {
+    burn_internal(coin);
+}
+
+ + + +
+ + + +## Function `burn_from` + +Burn coin from the specified account with capability. +The capability burn_cap should be passed as a reference to BurnCapability<CoinType>. +This function shouldn't fail as it's called as part of transaction fee burning. + +Note: This bypasses CoinStore::frozen -- coins within a frozen CoinStore can be burned. + + +
public fun burn_from<CoinType>(account_addr: address, amount: u64, burn_cap: &coin::BurnCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun burn_from<CoinType>(
+    account_addr: address,
+    amount: u64,
+    burn_cap: &BurnCapability<CoinType>,
+) acquires CoinInfo, CoinStore, CoinConversionMap, PairedFungibleAssetRefs {
+    // Skip burning if amount is zero. This shouldn't error out as it's called as part of transaction fee burning.
+    if (amount == 0) {
+        return
+    };
+
+    let (coin_amount_to_burn, fa_amount_to_burn) = calculate_amount_to_withdraw<CoinType>(
+        account_addr,
+        amount
+    );
+    if (coin_amount_to_burn > 0) {
+        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+        let coin_to_burn = extract(&mut coin_store.coin, coin_amount_to_burn);
+        burn(coin_to_burn, burn_cap);
+    };
+    if (fa_amount_to_burn > 0) {
+        fungible_asset::burn_from(
+            borrow_paired_burn_ref(burn_cap),
+            primary_fungible_store::primary_store(account_addr, option::destroy_some(paired_metadata<CoinType>())),
+            fa_amount_to_burn
+        );
+    };
+}
+
+ + + +
+ + + +## Function `deposit` + +Deposit the coin balance into the recipient's account and emit an event. + + +
public fun deposit<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public fun deposit<CoinType>(
+    account_addr: address,
+    coin: Coin<CoinType>
+) acquires CoinStore, CoinConversionMap, CoinInfo {
+    if (exists<CoinStore<CoinType>>(account_addr)) {
+        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+        assert!(
+            !coin_store.frozen,
+            error::permission_denied(EFROZEN),
+        );
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                CoinDeposit { coin_type: type_name<CoinType>(), account: account_addr, amount: coin.value }
+            );
+        };
+        event::emit_event<DepositEvent>(
+            &mut coin_store.deposit_events,
+            DepositEvent { amount: coin.value },
+        );
+        merge(&mut coin_store.coin, coin);
+    } else {
+        let metadata = paired_metadata<CoinType>();
+        if (option::is_some(&metadata) && migrated_primary_fungible_store_exists(
+            account_addr,
+            option::destroy_some(metadata)
+        )) {
+            primary_fungible_store::deposit(account_addr, coin_to_fungible_asset(coin));
+        } else {
+            abort error::not_found(ECOIN_STORE_NOT_PUBLISHED)
+        };
+    }
+}
+
+ + + +
+ + + +## Function `migrated_primary_fungible_store_exists` + + + +
fun migrated_primary_fungible_store_exists(account_address: address, metadata: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
inline fun migrated_primary_fungible_store_exists(
+    account_address: address,
+    metadata: Object<Metadata>
+): bool {
+    let primary_store_address = primary_fungible_store::primary_store_address<Metadata>(account_address, metadata);
+    fungible_asset::store_exists(primary_store_address) && (
+        // migration flag is needed, until we start defaulting new accounts to APT PFS
+        features::new_accounts_default_to_fa_apt_store_enabled() || exists<MigrationFlag>(primary_store_address)
+    )
+}
+
+ + + +
+ + + +## Function `force_deposit` + +Deposit the coin balance into the recipient's account without checking if the account is frozen. +This is for internal use only and doesn't emit an DepositEvent. + + +
public(friend) fun force_deposit<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public(friend) fun force_deposit<CoinType>(
+    account_addr: address,
+    coin: Coin<CoinType>
+) acquires CoinStore, CoinConversionMap, CoinInfo {
+    if (exists<CoinStore<CoinType>>(account_addr)) {
+        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+        merge(&mut coin_store.coin, coin);
+    } else {
+        let metadata = paired_metadata<CoinType>();
+        if (option::is_some(&metadata) && migrated_primary_fungible_store_exists(
+            account_addr,
+            option::destroy_some(metadata)
+        )) {
+            let fa = coin_to_fungible_asset(coin);
+            let metadata = fungible_asset::asset_metadata(&fa);
+            let store = primary_fungible_store::primary_store(account_addr, metadata);
+            fungible_asset::deposit_internal(object::object_address(&store), fa);
+        } else {
+            abort error::not_found(ECOIN_STORE_NOT_PUBLISHED)
+        }
+    }
+}
+
+ + + +
+ + + +## Function `destroy_zero` + +Destroys a zero-value coin. Calls will fail if the value in the passed-in token is non-zero +so it is impossible to "burn" any non-zero amount of Coin without having +a BurnCapability for the specific CoinType. + + +
public fun destroy_zero<CoinType>(zero_coin: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public fun destroy_zero<CoinType>(zero_coin: Coin<CoinType>) {
+    spec {
+        update supply<CoinType> = supply<CoinType> - zero_coin.value;
+    };
+    let Coin { value } = zero_coin;
+    assert!(value == 0, error::invalid_argument(EDESTRUCTION_OF_NONZERO_TOKEN))
+}
+
+ + + +
+ + + +## Function `extract` + +Extracts amount from the passed-in coin, where the original token is modified in place. + + +
public fun extract<CoinType>(coin: &mut coin::Coin<CoinType>, amount: u64): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public fun extract<CoinType>(coin: &mut Coin<CoinType>, amount: u64): Coin<CoinType> {
+    assert!(coin.value >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE));
+    spec {
+        update supply<CoinType> = supply<CoinType> - amount;
+    };
+    coin.value = coin.value - amount;
+    spec {
+        update supply<CoinType> = supply<CoinType> + amount;
+    };
+    Coin { value: amount }
+}
+
+ + + +
+ + + +## Function `extract_all` + +Extracts the entire amount from the passed-in coin, where the original token is modified in place. + + +
public fun extract_all<CoinType>(coin: &mut coin::Coin<CoinType>): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public fun extract_all<CoinType>(coin: &mut Coin<CoinType>): Coin<CoinType> {
+    let total_value = coin.value;
+    spec {
+        update supply<CoinType> = supply<CoinType> - coin.value;
+    };
+    coin.value = 0;
+    spec {
+        update supply<CoinType> = supply<CoinType> + total_value;
+    };
+    Coin { value: total_value }
+}
+
+ + + +
+ + + +## Function `freeze_coin_store` + +Freeze a CoinStore to prevent transfers + + +
#[legacy_entry_fun]
+public entry fun freeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
+ + + +
+Implementation + + +
public entry fun freeze_coin_store<CoinType>(
+    account_addr: address,
+    _freeze_cap: &FreezeCapability<CoinType>,
+) acquires CoinStore {
+    let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+    coin_store.frozen = true;
+}
+
+ + + +
+ + + +## Function `unfreeze_coin_store` + +Unfreeze a CoinStore to allow transfers + + +
#[legacy_entry_fun]
+public entry fun unfreeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
+ + + +
+Implementation + + +
public entry fun unfreeze_coin_store<CoinType>(
+    account_addr: address,
+    _freeze_cap: &FreezeCapability<CoinType>,
+) acquires CoinStore {
+    let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+    coin_store.frozen = false;
+}
+
+ + + +
+ + + +## Function `upgrade_supply` + +Upgrade total supply to use a parallelizable implementation if it is +available. + + +
public entry fun upgrade_supply<CoinType>(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun upgrade_supply<CoinType>(account: &signer) acquires CoinInfo, SupplyConfig {
+    let account_addr = signer::address_of(account);
+
+    // Only coin creators can upgrade total supply.
+    assert!(
+        coin_address<CoinType>() == account_addr,
+        error::invalid_argument(ECOIN_INFO_ADDRESS_MISMATCH),
+    );
+
+    // Can only succeed once on-chain governance agreed on the upgrade.
+    assert!(
+        borrow_global_mut<SupplyConfig>(@aptos_framework).allow_upgrades,
+        error::permission_denied(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED)
+    );
+
+    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(account_addr).supply;
+    if (option::is_some(maybe_supply)) {
+        let supply = option::borrow_mut(maybe_supply);
+
+        // If supply is tracked and the current implementation uses an integer - upgrade.
+        if (!optional_aggregator::is_parallelizable(supply)) {
+            optional_aggregator::switch(supply);
+        }
+    }
+}
+
+ + + +
+ + + +## Function `initialize` + +Creates a new Coin with given CoinType and returns minting/freezing/burning capabilities. +The given signer also becomes the account hosting the information about the coin +(name, supply, etc.). Supply is initialized as non-parallelizable integer. + + +
public fun initialize<CoinType>(account: &signer, name: string::String, symbol: string::String, decimals: u8, monitor_supply: bool): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun initialize<CoinType>(
+    account: &signer,
+    name: string::String,
+    symbol: string::String,
+    decimals: u8,
+    monitor_supply: bool,
+): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) {
+    initialize_internal(account, name, symbol, decimals, monitor_supply, false)
+}
+
+ + + +
+ + + +## Function `initialize_with_parallelizable_supply` + +Same as initialize but supply can be initialized to parallelizable aggregator. + + +
public(friend) fun initialize_with_parallelizable_supply<CoinType>(account: &signer, name: string::String, symbol: string::String, decimals: u8, monitor_supply: bool): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + +
+Implementation + + +
public(friend) fun initialize_with_parallelizable_supply<CoinType>(
+    account: &signer,
+    name: string::String,
+    symbol: string::String,
+    decimals: u8,
+    monitor_supply: bool,
+): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) {
+    system_addresses::assert_aptos_framework(account);
+    initialize_internal(account, name, symbol, decimals, monitor_supply, true)
+}
+
+ + + +
+ + + +## Function `initialize_internal` + + + +
fun initialize_internal<CoinType>(account: &signer, name: string::String, symbol: string::String, decimals: u8, monitor_supply: bool, parallelizable: bool): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + +
+Implementation + + +
fun initialize_internal<CoinType>(
+    account: &signer,
+    name: string::String,
+    symbol: string::String,
+    decimals: u8,
+    monitor_supply: bool,
+    parallelizable: bool,
+): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) {
+    let account_addr = signer::address_of(account);
+
+    assert!(
+        coin_address<CoinType>() == account_addr,
+        error::invalid_argument(ECOIN_INFO_ADDRESS_MISMATCH),
+    );
+
+    assert!(
+        !exists<CoinInfo<CoinType>>(account_addr),
+        error::already_exists(ECOIN_INFO_ALREADY_PUBLISHED),
+    );
+
+    assert!(string::length(&name) <= MAX_COIN_NAME_LENGTH, error::invalid_argument(ECOIN_NAME_TOO_LONG));
+    assert!(string::length(&symbol) <= MAX_COIN_SYMBOL_LENGTH, error::invalid_argument(ECOIN_SYMBOL_TOO_LONG));
+
+    let coin_info = CoinInfo<CoinType> {
+        name,
+        symbol,
+        decimals,
+        supply: if (monitor_supply) {
+            option::some(
+                optional_aggregator::new(MAX_U128, parallelizable)
+            )
+        } else { option::none() },
+    };
+    move_to(account, coin_info);
+
+    (BurnCapability<CoinType> {}, FreezeCapability<CoinType> {}, MintCapability<CoinType> {})
+}
+
+ + + +
+ + + +## Function `merge` + +"Merges" the two given coins. The coin passed in as dst_coin will have a value equal +to the sum of the two tokens (dst_coin and source_coin). + + +
public fun merge<CoinType>(dst_coin: &mut coin::Coin<CoinType>, source_coin: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public fun merge<CoinType>(dst_coin: &mut Coin<CoinType>, source_coin: Coin<CoinType>) {
+    spec {
+        assume dst_coin.value + source_coin.value <= MAX_U64;
+    };
+    spec {
+        update supply<CoinType> = supply<CoinType> - source_coin.value;
+    };
+    let Coin { value } = source_coin;
+    spec {
+        update supply<CoinType> = supply<CoinType> + value;
+    };
+    dst_coin.value = dst_coin.value + value;
+}
+
+ + + +
+ + + +## Function `mint` + +Mint new Coin with capability. +The capability _cap should be passed as reference to MintCapability<CoinType>. +Returns minted Coin. + + +
public fun mint<CoinType>(amount: u64, _cap: &coin::MintCapability<CoinType>): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public fun mint<CoinType>(
+    amount: u64,
+    _cap: &MintCapability<CoinType>,
+): Coin<CoinType> acquires CoinInfo {
+    mint_internal<CoinType>(amount)
+}
+
+ + + +
+ + + +## Function `register` + + + +
public fun register<CoinType>(account: &signer)
+
+ + + +
+Implementation + + +
public fun register<CoinType>(account: &signer) acquires CoinConversionMap {
+    let account_addr = signer::address_of(account);
+    // Short-circuit and do nothing if account is already registered for CoinType.
+    if (is_account_registered<CoinType>(account_addr)) {
+        return
+    };
+
+    account::register_coin<CoinType>(account_addr);
+    let coin_store = CoinStore<CoinType> {
+        coin: Coin { value: 0 },
+        frozen: false,
+        deposit_events: account::new_event_handle<DepositEvent>(account),
+        withdraw_events: account::new_event_handle<WithdrawEvent>(account),
+    };
+    move_to(account, coin_store);
+}
+
+ + + +
+ + + +## Function `transfer` + +Transfers amount of coins CoinType from from to to. + + +
public entry fun transfer<CoinType>(from: &signer, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer<CoinType>(
+    from: &signer,
+    to: address,
+    amount: u64,
+) acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType {
+    let coin = withdraw<CoinType>(from, amount);
+    deposit(to, coin);
+}
+
+ + + +
+ + + +## Function `value` + +Returns the value passed in coin. + + +
public fun value<CoinType>(coin: &coin::Coin<CoinType>): u64
+
+ + + +
+Implementation + + +
public fun value<CoinType>(coin: &Coin<CoinType>): u64 {
+    coin.value
+}
+
+ + + +
+ + + +## Function `withdraw` + +Withdraw specified amount of coin CoinType from the signing account. + + +
public fun withdraw<CoinType>(account: &signer, amount: u64): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public fun withdraw<CoinType>(
+    account: &signer,
+    amount: u64,
+): Coin<CoinType> acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType {
+    let account_addr = signer::address_of(account);
+
+    let (coin_amount_to_withdraw, fa_amount_to_withdraw) = calculate_amount_to_withdraw<CoinType>(
+        account_addr,
+        amount
+    );
+    let withdrawn_coin = if (coin_amount_to_withdraw > 0) {
+        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+        assert!(
+            !coin_store.frozen,
+            error::permission_denied(EFROZEN),
+        );
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                CoinWithdraw {
+                    coin_type: type_name<CoinType>(), account: account_addr, amount: coin_amount_to_withdraw
+                }
+            );
+        };
+        event::emit_event<WithdrawEvent>(
+            &mut coin_store.withdraw_events,
+            WithdrawEvent { amount: coin_amount_to_withdraw },
+        );
+        extract(&mut coin_store.coin, coin_amount_to_withdraw)
+    } else {
+        zero()
+    };
+    if (fa_amount_to_withdraw > 0) {
+        let fa = primary_fungible_store::withdraw(
+            account,
+            option::destroy_some(paired_metadata<CoinType>()),
+            fa_amount_to_withdraw
+        );
+        merge(&mut withdrawn_coin, fungible_asset_to_coin(fa));
+    };
+    withdrawn_coin
+}
+
+ + + +
+ + + +## Function `zero` + +Create a new Coin<CoinType> with a value of 0. + + +
public fun zero<CoinType>(): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public fun zero<CoinType>(): Coin<CoinType> {
+    spec {
+        update supply<CoinType> = supply<CoinType> + 0;
+    };
+    Coin<CoinType> {
+        value: 0
+    }
+}
+
+ + + +
+ + + +## Function `destroy_freeze_cap` + +Destroy a freeze capability. Freeze capability is dangerous and therefore should be destroyed if not used. + + +
public fun destroy_freeze_cap<CoinType>(freeze_cap: coin::FreezeCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun destroy_freeze_cap<CoinType>(freeze_cap: FreezeCapability<CoinType>) {
+    let FreezeCapability<CoinType> {} = freeze_cap;
+}
+
+ + + +
+ + + +## Function `destroy_mint_cap` + +Destroy a mint capability. + + +
public fun destroy_mint_cap<CoinType>(mint_cap: coin::MintCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun destroy_mint_cap<CoinType>(mint_cap: MintCapability<CoinType>) {
+    let MintCapability<CoinType> {} = mint_cap;
+}
+
+ + + +
+ + + +## Function `destroy_burn_cap` + +Destroy a burn capability. + + +
public fun destroy_burn_cap<CoinType>(burn_cap: coin::BurnCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun destroy_burn_cap<CoinType>(burn_cap: BurnCapability<CoinType>) {
+    let BurnCapability<CoinType> {} = burn_cap;
+}
+
+ + + +
+ + + +## Function `mint_internal` + + + +
fun mint_internal<CoinType>(amount: u64): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
fun mint_internal<CoinType>(amount: u64): Coin<CoinType> acquires CoinInfo {
+    if (amount == 0) {
+        return Coin<CoinType> {
+            value: 0
+        }
+    };
+
+    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
+    if (option::is_some(maybe_supply)) {
+        let supply = option::borrow_mut(maybe_supply);
+        spec {
+            use aptos_framework::optional_aggregator;
+            use aptos_framework::aggregator;
+            assume optional_aggregator::is_parallelizable(supply) ==> (aggregator::spec_aggregator_get_val(
+                option::borrow(supply.aggregator)
+            )
+                + amount <= aggregator::spec_get_limit(option::borrow(supply.aggregator)));
+            assume !optional_aggregator::is_parallelizable(supply) ==>
+                (option::borrow(supply.integer).value + amount <= option::borrow(supply.integer).limit);
+        };
+        optional_aggregator::add(supply, (amount as u128));
+    };
+    spec {
+        update supply<CoinType> = supply<CoinType> + amount;
+    };
+    Coin<CoinType> { value: amount }
+}
+
+ + + +
+ + + +## Function `burn_internal` + + + +
fun burn_internal<CoinType>(coin: coin::Coin<CoinType>): u64
+
+ + + +
+Implementation + + +
fun burn_internal<CoinType>(coin: Coin<CoinType>): u64 acquires CoinInfo {
+    spec {
+        update supply<CoinType> = supply<CoinType> - coin.value;
+    };
+    let Coin { value: amount } = coin;
+    if (amount != 0) {
+        let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(coin_address<CoinType>()).supply;
+        if (option::is_some(maybe_supply)) {
+            let supply = option::borrow_mut(maybe_supply);
+            optional_aggregator::sub(supply, (amount as u128));
+        };
+    };
+    amount
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Only the owner of a coin may mint, burn or freeze coins.CriticalAcquiring capabilities for a particular CoinType may only occur if the caller has a signer for the module declaring that type. The initialize function returns these capabilities to the caller.Formally Verified via upgrade_supply and initialize.
2Each coin may only be created exactly once.MediumThe initialization function may only be called once.Formally Verified via initialize.
3The merging of coins may only be done on coins of the same type.CriticalThe merge function is limited to merging coins of the same type only.Formally Verified via merge.
4The supply of a coin is only affected by burn and mint operations.HighOnly mint and burn operations on a coin alter the total supply of coins.Formally Verified via TotalSupplyNoChange.
5Users may register an account for a coin multiple times idempotently.MediumThe register function should work idempotently. Importantly, it should not abort if the coin is already registered.Formally verified via aborts_if on register.
6Coin operations should fail if the user has not registered for the coin.MediumCoin operations may succeed only on valid user coin registration.Formally Verified via balance, burn_from, freeze, unfreeze, transfer and withdraw.
7It should always be possible to (1) determine if a coin exists, and (2) determine if a user registered an account with a particular coin. If a coin exists, it should always be possible to request the following information of the coin: (1) Name, (2) Symbol, and (3) Supply.LowThe following functions should never abort: (1) is_coin_initialized, and (2) is_account_registered. The following functions should not abort if the coin exists: (1) name, (2) symbol, and (3) supply.Formally Verified in corresponding functions: is_coin_initialized, is_account_registered, name, symbol and supply.
8Coin operations should fail if the user's CoinStore is frozen.MediumIf the CoinStore of an address is frozen, coin operations are disallowed.Formally Verified via withdraw, transfer and deposit.
9Utilizing AggregatableCoins does not violate other critical invariants, such as (4).HighUtilizing AggregatableCoin does not change the real-supply of any token.Formally Verified via TotalSupplyNoChange.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+
+global supply<CoinType>: num;
+
+global aggregate_supply<CoinType>: num;
+apply TotalSupplyTracked<CoinType> to *<CoinType> except
+initialize, initialize_internal, initialize_with_parallelizable_supply;
+
+ + + + + + + +
fun spec_fun_supply_tracked<CoinType>(val: u64, supply: Option<OptionalAggregator>): bool {
+   option::spec_is_some(supply) ==> val == optional_aggregator::optional_aggregator_value
+       (option::spec_borrow(supply))
+}
+
+ + + + + + + +
schema TotalSupplyTracked<CoinType> {
+    ensures old(spec_fun_supply_tracked<CoinType>(supply<CoinType> + aggregate_supply<CoinType>,
+        global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply)) ==>
+        spec_fun_supply_tracked<CoinType>(supply<CoinType> + aggregate_supply<CoinType>,
+            global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply);
+}
+
+ + + + + + + +
fun spec_fun_supply_no_change<CoinType>(old_supply: Option<OptionalAggregator>,
+                                            supply: Option<OptionalAggregator>): bool {
+   option::spec_is_some(old_supply) ==> optional_aggregator::optional_aggregator_value
+       (option::spec_borrow(old_supply)) == optional_aggregator::optional_aggregator_value
+       (option::spec_borrow(supply))
+}
+
+ + + + + + + +
schema TotalSupplyNoChange<CoinType> {
+    let old_supply = global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply;
+    let post supply = global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply;
+    ensures spec_fun_supply_no_change<CoinType>(old_supply, supply);
+}
+
+ + + + + + + +
fun spec_is_account_registered<CoinType>(account_addr: address): bool {
+   let paired_metadata_opt = spec_paired_metadata<CoinType>();
+   exists<CoinStore<CoinType>>(account_addr) || (option::spec_is_some(
+       paired_metadata_opt
+   ) && primary_fungible_store::spec_primary_store_exists(account_addr, option::spec_borrow(paired_metadata_opt)))
+}
+
+ + + + + + + +
schema CoinSubAbortsIf<CoinType> {
+    amount: u64;
+    let addr = type_info::type_of<CoinType>().account_address;
+    let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+    include (option::is_some(
+        maybe_supply
+    )) ==> optional_aggregator::SubAbortsIf { optional_aggregator: option::borrow(maybe_supply), value: amount };
+}
+
+ + + + + + + +
schema CoinAddAbortsIf<CoinType> {
+    amount: u64;
+    let addr = type_info::type_of<CoinType>().account_address;
+    let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+    include (option::is_some(
+        maybe_supply
+    )) ==> optional_aggregator::AddAbortsIf { optional_aggregator: option::borrow(maybe_supply), value: amount };
+}
+
+ + + + + + + +
schema AbortsIfNotExistCoinInfo<CoinType> {
+    let addr = type_info::type_of<CoinType>().account_address;
+    aborts_if !exists<CoinInfo<CoinType>>(addr);
+}
+
+ + + + + +### Struct `AggregatableCoin` + + +
struct AggregatableCoin<CoinType> has store
+
+ + + +
+
+value: aggregator::Aggregator +
+
+ Amount of aggregatable coin this address has. +
+
+ + + +
invariant aggregator::spec_get_limit(value) == MAX_U64;
+
+ + + + + +### Function `coin_to_fungible_asset` + + +
public fun coin_to_fungible_asset<CoinType>(coin: coin::Coin<CoinType>): fungible_asset::FungibleAsset
+
+ + + + +
pragma verify = false;
+let addr = type_info::type_of<CoinType>().account_address;
+modifies global<CoinInfo<CoinType>>(addr);
+
+ + + + + +### Function `fungible_asset_to_coin` + + +
fun fungible_asset_to_coin<CoinType>(fungible_asset: fungible_asset::FungibleAsset): coin::Coin<CoinType>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `initialize_supply_config` + + +
public(friend) fun initialize_supply_config(aptos_framework: &signer)
+
+ + +Can only be initialized once. +Can only be published by reserved addresses. + + +
let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<SupplyConfig>(aptos_addr);
+ensures !global<SupplyConfig>(aptos_addr).allow_upgrades;
+ensures exists<SupplyConfig>(aptos_addr);
+
+ + + + + +### Function `allow_supply_upgrades` + + +
public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool)
+
+ + +Can only be updated by @aptos_framework. + + +
modifies global<SupplyConfig>(@aptos_framework);
+let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if !exists<SupplyConfig>(aptos_addr);
+let post allow_upgrades_post = global<SupplyConfig>(@aptos_framework);
+ensures allow_upgrades_post.allow_upgrades == allowed;
+
+ + + + + +### Function `initialize_aggregatable_coin` + + +
public(friend) fun initialize_aggregatable_coin<CoinType>(aptos_framework: &signer): coin::AggregatableCoin<CoinType>
+
+ + + + +
include system_addresses::AbortsIfNotAptosFramework { account: aptos_framework };
+include aggregator_factory::CreateAggregatorInternalAbortsIf;
+
+ + + + + +### Function `is_aggregatable_coin_zero` + + +
public(friend) fun is_aggregatable_coin_zero<CoinType>(coin: &coin::AggregatableCoin<CoinType>): bool
+
+ + + + +
aborts_if false;
+ensures result == (aggregator::spec_read(coin.value) == 0);
+
+ + + + + +### Function `drain_aggregatable_coin` + + +
public(friend) fun drain_aggregatable_coin<CoinType>(coin: &mut coin::AggregatableCoin<CoinType>): coin::Coin<CoinType>
+
+ + + + +
aborts_if aggregator::spec_read(coin.value) > MAX_U64;
+ensures result.value == aggregator::spec_aggregator_get_val(old(coin).value);
+
+ + + + + +### Function `merge_aggregatable_coin` + + +
public(friend) fun merge_aggregatable_coin<CoinType>(dst_coin: &mut coin::AggregatableCoin<CoinType>, coin: coin::Coin<CoinType>)
+
+ + + + +
let aggr = dst_coin.value;
+let post p_aggr = dst_coin.value;
+aborts_if aggregator::spec_aggregator_get_val(aggr)
+    + coin.value > aggregator::spec_get_limit(aggr);
+aborts_if aggregator::spec_aggregator_get_val(aggr)
+    + coin.value > MAX_U128;
+ensures aggregator::spec_aggregator_get_val(aggr) + coin.value == aggregator::spec_aggregator_get_val(p_aggr);
+
+ + + + + +### Function `collect_into_aggregatable_coin` + + +
public(friend) fun collect_into_aggregatable_coin<CoinType>(account_addr: address, amount: u64, dst_coin: &mut coin::AggregatableCoin<CoinType>)
+
+ + + + +
pragma verify = false;
+let aggr = dst_coin.value;
+let post p_aggr = dst_coin.value;
+let coin_store = global<CoinStore<CoinType>>(account_addr);
+let post p_coin_store = global<CoinStore<CoinType>>(account_addr);
+aborts_if amount > 0 && !exists<CoinStore<CoinType>>(account_addr);
+aborts_if amount > 0 && coin_store.coin.value < amount;
+aborts_if amount > 0 && aggregator::spec_aggregator_get_val(aggr)
+    + amount > aggregator::spec_get_limit(aggr);
+aborts_if amount > 0 && aggregator::spec_aggregator_get_val(aggr)
+    + amount > MAX_U128;
+ensures aggregator::spec_aggregator_get_val(aggr) + amount == aggregator::spec_aggregator_get_val(p_aggr);
+ensures coin_store.coin.value - amount == p_coin_store.coin.value;
+
+ + + + + +### Function `maybe_convert_to_fungible_store` + + +
fun maybe_convert_to_fungible_store<CoinType>(account: address)
+
+ + + + +
pragma verify = false;
+modifies global<CoinInfo<CoinType>>(account);
+modifies global<CoinStore<CoinType>>(account);
+
+ + + + + + + +
schema DepositAbortsIf<CoinType> {
+    account_addr: address;
+    let coin_store = global<CoinStore<CoinType>>(account_addr);
+    aborts_if !exists<CoinStore<CoinType>>(account_addr);
+    aborts_if coin_store.frozen;
+}
+
+ + + + + +### Function `coin_address` + + +
fun coin_address<CoinType>(): address
+
+ + +Get address by reflection. + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == type_info::type_of<CoinType>().account_address;
+
+ + + + + +### Function `balance` + + +
#[view]
+public fun balance<CoinType>(owner: address): u64
+
+ + + + +
pragma verify = false;
+aborts_if !exists<CoinStore<CoinType>>(owner);
+ensures result == global<CoinStore<CoinType>>(owner).coin.value;
+
+ + + + + +### Function `is_coin_initialized` + + +
#[view]
+public fun is_coin_initialized<CoinType>(): bool
+
+ + + + +
// This enforces high-level requirement 7:
+aborts_if false;
+
+ + + + + +### Function `is_account_registered` + + +
#[view]
+public fun is_account_registered<CoinType>(account_addr: address): bool
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if false;
+
+ + + + + + + +
fun get_coin_supply_opt<CoinType>(): Option<OptionalAggregator> {
+   global<CoinInfo<CoinType>>(type_info::type_of<CoinType>().account_address).supply
+}
+
+ + + + + + + +
fun spec_paired_metadata<CoinType>(): Option<Object<Metadata>> {
+   if (exists<CoinConversionMap>(@aptos_framework)) {
+       let map = global<CoinConversionMap>(@aptos_framework).coin_to_fungible_asset_map;
+       if (table::spec_contains(map, type_info::type_of<CoinType>())) {
+           let metadata = table::spec_get(map, type_info::type_of<CoinType>());
+           option::spec_some(metadata)
+       } else {
+           option::spec_none()
+       }
+   } else {
+       option::spec_none()
+   }
+}
+
+ + + + + +### Function `name` + + +
#[view]
+public fun name<CoinType>(): string::String
+
+ + + + +
// This enforces high-level requirement 7:
+include AbortsIfNotExistCoinInfo<CoinType>;
+
+ + + + + +### Function `symbol` + + +
#[view]
+public fun symbol<CoinType>(): string::String
+
+ + + + +
// This enforces high-level requirement 7:
+include AbortsIfNotExistCoinInfo<CoinType>;
+
+ + + + + +### Function `decimals` + + +
#[view]
+public fun decimals<CoinType>(): u8
+
+ + + + +
include AbortsIfNotExistCoinInfo<CoinType>;
+
+ + + + + +### Function `supply` + + +
#[view]
+public fun supply<CoinType>(): option::Option<u128>
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `coin_supply` + + +
#[view]
+public fun coin_supply<CoinType>(): option::Option<u128>
+
+ + + + +
let coin_addr = type_info::type_of<CoinType>().account_address;
+// This enforces high-level requirement 7:
+aborts_if !exists<CoinInfo<CoinType>>(coin_addr);
+let maybe_supply = global<CoinInfo<CoinType>>(coin_addr).supply;
+let supply = option::spec_borrow(maybe_supply);
+let value = optional_aggregator::optional_aggregator_value(supply);
+ensures if (option::spec_is_some(maybe_supply)) {
+    result == option::spec_some(value)
+} else {
+    option::spec_is_none(result)
+};
+
+ + + + + +### Function `burn` + + +
public fun burn<CoinType>(coin: coin::Coin<CoinType>, _cap: &coin::BurnCapability<CoinType>)
+
+ + + + +
pragma verify = false;
+let addr = type_info::type_of<CoinType>().account_address;
+modifies global<CoinInfo<CoinType>>(addr);
+include AbortsIfNotExistCoinInfo<CoinType>;
+aborts_if coin.value == 0;
+include CoinSubAbortsIf<CoinType> { amount: coin.value };
+ensures supply<CoinType> == old(supply<CoinType>) - coin.value;
+
+ + + + + +### Function `burn_from` + + +
public fun burn_from<CoinType>(account_addr: address, amount: u64, burn_cap: &coin::BurnCapability<CoinType>)
+
+ + + + +
pragma verify = false;
+let addr = type_info::type_of<CoinType>().account_address;
+let coin_store = global<CoinStore<CoinType>>(account_addr);
+let post post_coin_store = global<CoinStore<CoinType>>(account_addr);
+modifies global<CoinInfo<CoinType>>(addr);
+modifies global<CoinStore<CoinType>>(account_addr);
+// This enforces high-level requirement 6:
+aborts_if amount != 0 && !exists<CoinInfo<CoinType>>(addr);
+aborts_if amount != 0 && !exists<CoinStore<CoinType>>(account_addr);
+aborts_if coin_store.coin.value < amount;
+let maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+let supply_aggr = option::spec_borrow(maybe_supply);
+let value = optional_aggregator::optional_aggregator_value(supply_aggr);
+let post post_maybe_supply = global<CoinInfo<CoinType>>(addr).supply;
+let post post_supply = option::spec_borrow(post_maybe_supply);
+let post post_value = optional_aggregator::optional_aggregator_value(post_supply);
+aborts_if option::spec_is_some(maybe_supply) && value < amount;
+ensures post_coin_store.coin.value == coin_store.coin.value - amount;
+// This enforces high-level requirement 5 of the managed_coin module:
+ensures if (option::spec_is_some(maybe_supply)) {
+    post_value == value - amount
+} else {
+    option::spec_is_none(post_maybe_supply)
+};
+ensures supply<CoinType> == old(supply<CoinType>) - amount;
+
+ + + + + +### Function `deposit` + + +
public fun deposit<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
+
+ + +account_addr is not frozen. + + +
pragma verify = false;
+modifies global<CoinInfo<CoinType>>(account_addr);
+// This enforces high-level requirement 8:
+include DepositAbortsIf<CoinType>;
+ensures global<CoinStore<CoinType>>(account_addr).coin.value == old(
+    global<CoinStore<CoinType>>(account_addr)
+).coin.value + coin.value;
+
+ + + + + +### Function `force_deposit` + + +
public(friend) fun force_deposit<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
+
+ + + + +
pragma verify = false;
+modifies global<CoinStore<CoinType>>(account_addr);
+aborts_if !exists<CoinStore<CoinType>>(account_addr);
+ensures global<CoinStore<CoinType>>(account_addr).coin.value == old(
+    global<CoinStore<CoinType>>(account_addr)
+).coin.value + coin.value;
+
+ + + + + +### Function `destroy_zero` + + +
public fun destroy_zero<CoinType>(zero_coin: coin::Coin<CoinType>)
+
+ + +The value of zero_coin must be 0. + + +
aborts_if zero_coin.value > 0;
+
+ + + + + +### Function `extract` + + +
public fun extract<CoinType>(coin: &mut coin::Coin<CoinType>, amount: u64): coin::Coin<CoinType>
+
+ + + + +
aborts_if coin.value < amount;
+ensures result.value == amount;
+ensures coin.value == old(coin.value) - amount;
+
+ + + + + +### Function `extract_all` + + +
public fun extract_all<CoinType>(coin: &mut coin::Coin<CoinType>): coin::Coin<CoinType>
+
+ + + + +
ensures result.value == old(coin).value;
+ensures coin.value == 0;
+
+ + + + + +### Function `freeze_coin_store` + + +
#[legacy_entry_fun]
+public entry fun freeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
+ + + + +
pragma verify = false;
+modifies global<CoinStore<CoinType>>(account_addr);
+// This enforces high-level requirement 6:
+aborts_if !exists<CoinStore<CoinType>>(account_addr);
+let post coin_store = global<CoinStore<CoinType>>(account_addr);
+ensures coin_store.frozen;
+
+ + + + + +### Function `unfreeze_coin_store` + + +
#[legacy_entry_fun]
+public entry fun unfreeze_coin_store<CoinType>(account_addr: address, _freeze_cap: &coin::FreezeCapability<CoinType>)
+
+ + + + +
pragma verify = false;
+modifies global<CoinStore<CoinType>>(account_addr);
+// This enforces high-level requirement 6:
+aborts_if !exists<CoinStore<CoinType>>(account_addr);
+let post coin_store = global<CoinStore<CoinType>>(account_addr);
+ensures !coin_store.frozen;
+
+ + + + + +### Function `upgrade_supply` + + +
public entry fun upgrade_supply<CoinType>(account: &signer)
+
+ + +The creator of CoinType must be @aptos_framework. +SupplyConfig allow upgrade. + + +
let account_addr = signer::address_of(account);
+let coin_address = type_info::type_of<CoinType>().account_address;
+aborts_if coin_address != account_addr;
+aborts_if !exists<SupplyConfig>(@aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if !exists<CoinInfo<CoinType>>(account_addr);
+let supply_config = global<SupplyConfig>(@aptos_framework);
+aborts_if !supply_config.allow_upgrades;
+modifies global<CoinInfo<CoinType>>(account_addr);
+let maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply;
+let supply = option::spec_borrow(maybe_supply);
+let value = optional_aggregator::optional_aggregator_value(supply);
+let post post_maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply;
+let post post_supply = option::spec_borrow(post_maybe_supply);
+let post post_value = optional_aggregator::optional_aggregator_value(post_supply);
+let supply_no_parallel = option::spec_is_some(maybe_supply) &&
+    !optional_aggregator::is_parallelizable(supply);
+aborts_if supply_no_parallel && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+ensures supply_no_parallel ==>
+    optional_aggregator::is_parallelizable(post_supply) && post_value == value;
+
+ + + + + +### Function `initialize` + + +
public fun initialize<CoinType>(account: &signer, name: string::String, symbol: string::String, decimals: u8, monitor_supply: bool): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + + +
let account_addr = signer::address_of(account);
+// This enforces high-level requirement 1:
+aborts_if type_info::type_of<CoinType>().account_address != account_addr;
+// This enforces high-level requirement 2:
+aborts_if exists<CoinInfo<CoinType>>(account_addr);
+aborts_if string::length(name) > MAX_COIN_NAME_LENGTH;
+aborts_if string::length(symbol) > MAX_COIN_SYMBOL_LENGTH;
+
+ + + + + +### Function `initialize_with_parallelizable_supply` + + +
public(friend) fun initialize_with_parallelizable_supply<CoinType>(account: &signer, name: string::String, symbol: string::String, decimals: u8, monitor_supply: bool): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + + +
let addr = signer::address_of(account);
+aborts_if addr != @aptos_framework;
+aborts_if monitor_supply && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+include InitializeInternalSchema<CoinType> {
+    name: name.bytes,
+    symbol: symbol.bytes
+};
+ensures exists<CoinInfo<CoinType>>(addr);
+
+ + +Make sure name and symbol are legal length. +Only the creator of CoinType can initialize. + + + + + +
schema InitializeInternalSchema<CoinType> {
+    account: signer;
+    name: vector<u8>;
+    symbol: vector<u8>;
+    let account_addr = signer::address_of(account);
+    let coin_address = type_info::type_of<CoinType>().account_address;
+    aborts_if coin_address != account_addr;
+    aborts_if exists<CoinInfo<CoinType>>(account_addr);
+    aborts_if len(name) > MAX_COIN_NAME_LENGTH;
+    aborts_if len(symbol) > MAX_COIN_SYMBOL_LENGTH;
+}
+
+ + + + + +### Function `initialize_internal` + + +
fun initialize_internal<CoinType>(account: &signer, name: string::String, symbol: string::String, decimals: u8, monitor_supply: bool, parallelizable: bool): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + + +
include InitializeInternalSchema<CoinType> {
+    name: name.bytes,
+    symbol: symbol.bytes
+};
+let account_addr = signer::address_of(account);
+let post coin_info = global<CoinInfo<CoinType>>(account_addr);
+let post supply = option::spec_borrow(coin_info.supply);
+let post value = optional_aggregator::optional_aggregator_value(supply);
+let post limit = optional_aggregator::optional_aggregator_limit(supply);
+modifies global<CoinInfo<CoinType>>(account_addr);
+aborts_if monitor_supply && parallelizable
+    && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+// This enforces high-level requirement 2 of the managed_coin module:
+ensures exists<CoinInfo<CoinType>>(account_addr)
+    && coin_info.name == name
+    && coin_info.symbol == symbol
+    && coin_info.decimals == decimals;
+ensures if (monitor_supply) {
+    value == 0 && limit == MAX_U128
+        && (parallelizable == optional_aggregator::is_parallelizable(supply))
+} else {
+    option::spec_is_none(coin_info.supply)
+};
+ensures result_1 == BurnCapability<CoinType> {};
+ensures result_2 == FreezeCapability<CoinType> {};
+ensures result_3 == MintCapability<CoinType> {};
+
+ + + + + +### Function `merge` + + +
public fun merge<CoinType>(dst_coin: &mut coin::Coin<CoinType>, source_coin: coin::Coin<CoinType>)
+
+ + + + +
// This enforces high-level requirement 3:
+ensures dst_coin.value == old(dst_coin.value) + source_coin.value;
+
+ + + + + +### Function `mint` + + +
public fun mint<CoinType>(amount: u64, _cap: &coin::MintCapability<CoinType>): coin::Coin<CoinType>
+
+ + + + +
let addr = type_info::type_of<CoinType>().account_address;
+modifies global<CoinInfo<CoinType>>(addr);
+
+ + + + + +### Function `register` + + +
public fun register<CoinType>(account: &signer)
+
+ + +An account can only be registered once. +Updating Account.guid_creation_num will not overflow. + + +
pragma verify = false;
+
+ + + + + +### Function `transfer` + + +
public entry fun transfer<CoinType>(from: &signer, to: address, amount: u64)
+
+ + +from and to account not frozen. +from and to not the same address. +from account sufficient balance. + + +
pragma verify = false;
+let account_addr_from = signer::address_of(from);
+let coin_store_from = global<CoinStore<CoinType>>(account_addr_from);
+let post coin_store_post_from = global<CoinStore<CoinType>>(account_addr_from);
+let coin_store_to = global<CoinStore<CoinType>>(to);
+let post coin_store_post_to = global<CoinStore<CoinType>>(to);
+// This enforces high-level requirement 6:
+aborts_if !exists<CoinStore<CoinType>>(account_addr_from);
+aborts_if !exists<CoinStore<CoinType>>(to);
+// This enforces high-level requirement 8:
+aborts_if coin_store_from.frozen;
+aborts_if coin_store_to.frozen;
+aborts_if coin_store_from.coin.value < amount;
+ensures account_addr_from != to ==> coin_store_post_from.coin.value ==
+    coin_store_from.coin.value - amount;
+ensures account_addr_from != to ==> coin_store_post_to.coin.value == coin_store_to.coin.value + amount;
+ensures account_addr_from == to ==> coin_store_post_from.coin.value == coin_store_from.coin.value;
+
+ + + + + +### Function `withdraw` + + +
public fun withdraw<CoinType>(account: &signer, amount: u64): coin::Coin<CoinType>
+
+ + +Account is not frozen and sufficient balance. + + +
pragma verify = false;
+include WithdrawAbortsIf<CoinType>;
+modifies global<CoinStore<CoinType>>(account_addr);
+let account_addr = signer::address_of(account);
+let coin_store = global<CoinStore<CoinType>>(account_addr);
+let balance = coin_store.coin.value;
+let post coin_post = global<CoinStore<CoinType>>(account_addr).coin.value;
+ensures coin_post == balance - amount;
+ensures result == Coin<CoinType> { value: amount };
+
+ + + + + + + +
schema WithdrawAbortsIf<CoinType> {
+    account: &signer;
+    amount: u64;
+    let account_addr = signer::address_of(account);
+    let coin_store = global<CoinStore<CoinType>>(account_addr);
+    let balance = coin_store.coin.value;
+    // This enforces high-level requirement 6:
+    aborts_if !exists<CoinStore<CoinType>>(account_addr);
+    // This enforces high-level requirement 8:
+    aborts_if coin_store.frozen;
+    aborts_if balance < amount;
+}
+
+ + + + + +### Function `mint_internal` + + +
fun mint_internal<CoinType>(amount: u64): coin::Coin<CoinType>
+
+ + + + +
let addr = type_info::type_of<CoinType>().account_address;
+modifies global<CoinInfo<CoinType>>(addr);
+aborts_if (amount != 0) && !exists<CoinInfo<CoinType>>(addr);
+ensures supply<CoinType> == old(supply<CoinType>) + amount;
+ensures result.value == amount;
+
+ + + + + +### Function `burn_internal` + + +
fun burn_internal<CoinType>(coin: coin::Coin<CoinType>): u64
+
+ + + + +
pragma verify = false;
+let addr = type_info::type_of<CoinType>().account_address;
+modifies global<CoinInfo<CoinType>>(addr);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/config_buffer.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/config_buffer.md new file mode 100644 index 0000000000000..4200950b86cfd --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/config_buffer.md @@ -0,0 +1,342 @@ + + + +# Module `0x1::config_buffer` + +This wrapper helps store an on-chain config for the next epoch. + +Once reconfigure with DKG is introduced, every on-chain config C should do the following. +- Support async update when DKG is enabled. This is typically done by 3 steps below. +- Implement C::set_for_next_epoch() using upsert() function in this module. +- Implement C::on_new_epoch() using extract() function in this module. +- Update 0x1::reconfiguration_with_dkg::finish() to call C::on_new_epoch(). +- Support sychronous update when DKG is disabled. +This is typically done by implementing C::set() to update the config resource directly. + +NOTE: on-chain config 0x1::state::ValidatorSet implemented its own buffer. + + +- [Resource `PendingConfigs`](#0x1_config_buffer_PendingConfigs) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_config_buffer_initialize) +- [Function `does_exist`](#0x1_config_buffer_does_exist) +- [Function `upsert`](#0x1_config_buffer_upsert) +- [Function `extract`](#0x1_config_buffer_extract) +- [Specification](#@Specification_1) + - [Function `does_exist`](#@Specification_1_does_exist) + - [Function `upsert`](#@Specification_1_upsert) + - [Function `extract`](#@Specification_1_extract) + + +
use 0x1::any;
+use 0x1::option;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::type_info;
+
+ + + + + +## Resource `PendingConfigs` + + + +
struct PendingConfigs has key
+
+ + + +
+Fields + + +
+
+configs: simple_map::SimpleMap<string::String, any::Any> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Config buffer operations failed with permission denied. + + +
const ESTD_SIGNER_NEEDED: u64 = 1;
+
+ + + + + +## Function `initialize` + + + +
public fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    if (!exists<PendingConfigs>(@aptos_framework)) {
+        move_to(aptos_framework, PendingConfigs {
+            configs: simple_map::new(),
+        })
+    }
+}
+
+ + + +
+ + + +## Function `does_exist` + +Check whether there is a pending config payload for T. + + +
public fun does_exist<T: store>(): bool
+
+ + + +
+Implementation + + +
public fun does_exist<T: store>(): bool acquires PendingConfigs {
+    if (exists<PendingConfigs>(@aptos_framework)) {
+        let config = borrow_global<PendingConfigs>(@aptos_framework);
+        simple_map::contains_key(&config.configs, &type_info::type_name<T>())
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `upsert` + +Upsert an on-chain config to the buffer for the next epoch. + +Typically used in X::set_for_next_epoch() where X is an on-chain config. + + +
public(friend) fun upsert<T: drop, store>(config: T)
+
+ + + +
+Implementation + + +
public(friend) fun upsert<T: drop + store>(config: T) acquires PendingConfigs {
+    let configs = borrow_global_mut<PendingConfigs>(@aptos_framework);
+    let key = type_info::type_name<T>();
+    let value = any::pack(config);
+    simple_map::upsert(&mut configs.configs, key, value);
+}
+
+ + + +
+ + + +## Function `extract` + +Take the buffered config T out (buffer cleared). Abort if the buffer is empty. +Should only be used at the end of a reconfiguration. + +Typically used in X::on_new_epoch() where X is an on-chaon config. + + +
public fun extract<T: store>(): T
+
+ + + +
+Implementation + + +
public fun extract<T: store>(): T acquires PendingConfigs {
+    let configs = borrow_global_mut<PendingConfigs>(@aptos_framework);
+    let key = type_info::type_name<T>();
+    let (_, value_packed) = simple_map::remove(&mut configs.configs, &key);
+    any::unpack(value_packed)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+
+ + + + + +### Function `does_exist` + + +
public fun does_exist<T: store>(): bool
+
+ + + + +
aborts_if false;
+let type_name = type_info::type_name<T>();
+ensures result == spec_fun_does_exist<T>(type_name);
+
+ + + + + + + +
fun spec_fun_does_exist<T: store>(type_name: String): bool {
+   if (exists<PendingConfigs>(@aptos_framework)) {
+       let config = global<PendingConfigs>(@aptos_framework);
+       simple_map::spec_contains_key(config.configs, type_name)
+   } else {
+       false
+   }
+}
+
+ + + + + +### Function `upsert` + + +
public(friend) fun upsert<T: drop, store>(config: T)
+
+ + + + +
aborts_if !exists<PendingConfigs>(@aptos_framework);
+
+ + + + + +### Function `extract` + + +
public fun extract<T: store>(): T
+
+ + + + +
aborts_if !exists<PendingConfigs>(@aptos_framework);
+include ExtractAbortsIf<T>;
+
+ + + + + + + +
schema ExtractAbortsIf<T> {
+    let configs = global<PendingConfigs>(@aptos_framework);
+    let key = type_info::type_name<T>();
+    aborts_if !simple_map::spec_contains_key(configs.configs, key);
+    include any::UnpackAbortsIf<T> {
+        self: simple_map::spec_get(configs.configs, key)
+    };
+}
+
+ + + + + + + +
schema SetForNextEpochAbortsIf {
+    account: &signer;
+    config: vector<u8>;
+    let account_addr = std::signer::address_of(account);
+    aborts_if account_addr != @aptos_framework;
+    aborts_if len(config) == 0;
+    aborts_if !exists<PendingConfigs>(@aptos_framework);
+}
+
+ + + + + + + +
schema OnNewEpochAbortsIf<T> {
+    let type_name = type_info::type_name<T>();
+    let configs = global<PendingConfigs>(@aptos_framework);
+    include spec_fun_does_exist<T>(type_name) ==> any::UnpackAbortsIf<T> {
+        self: simple_map::spec_get(configs.configs, type_name)
+    };
+}
+
+ + + + + + + +
schema OnNewEpochRequirement<T> {
+    let type_name = type_info::type_name<T>();
+    let configs = global<PendingConfigs>(@aptos_framework);
+    include spec_fun_does_exist<T>(type_name) ==> any::UnpackRequirement<T> {
+        self: simple_map::spec_get(configs.configs, type_name)
+    };
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/consensus_config.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/consensus_config.md new file mode 100644 index 0000000000000..d8f4179edd006 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/consensus_config.md @@ -0,0 +1,445 @@ + + + +# Module `0x1::consensus_config` + +Maintains the consensus config for the blockchain. The config is stored in a +Reconfiguration, and may be updated by root. + + +- [Resource `ConsensusConfig`](#0x1_consensus_config_ConsensusConfig) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_consensus_config_initialize) +- [Function `set`](#0x1_consensus_config_set) +- [Function `set_for_next_epoch`](#0x1_consensus_config_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_consensus_config_on_new_epoch) +- [Function `validator_txn_enabled`](#0x1_consensus_config_validator_txn_enabled) +- [Function `validator_txn_enabled_internal`](#0x1_consensus_config_validator_txn_enabled_internal) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `set`](#@Specification_1_set) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `validator_txn_enabled`](#@Specification_1_validator_txn_enabled) + - [Function `validator_txn_enabled_internal`](#@Specification_1_validator_txn_enabled_internal) + + +
use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::error;
+use 0x1::reconfiguration;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `ConsensusConfig` + + + +
struct ConsensusConfig has drop, store, key
+
+ + + +
+Fields + + +
+
+config: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The provided on chain config bytes are empty or invalid + + +
const EINVALID_CONFIG: u64 = 1;
+
+ + + + + +## Function `initialize` + +Publishes the ConsensusConfig config. + + +
public(friend) fun initialize(aptos_framework: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer, config: vector<u8>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+    move_to(aptos_framework, ConsensusConfig { config });
+}
+
+ + + +
+ + + +## Function `set` + +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun set(account: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set(account: &signer, config: vector<u8>) acquires ConsensusConfig {
+    system_addresses::assert_aptos_framework(account);
+    chain_status::assert_genesis();
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+
+    let config_ref = &mut borrow_global_mut<ConsensusConfig>(@aptos_framework).config;
+    *config_ref = config;
+
+    // Need to trigger reconfiguration so validator nodes can sync on the updated configs.
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +This can be called by on-chain governance to update on-chain consensus configs for the next epoch. +Example usage: +``` +aptos_framework::consensus_config::set_for_next_epoch(&framework_signer, some_config_bytes); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>) {
+    system_addresses::assert_aptos_framework(account);
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+    std::config_buffer::upsert<ConsensusConfig>(ConsensusConfig {config});
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending ConsensusConfig, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires ConsensusConfig {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<ConsensusConfig>()) {
+        let new_config = config_buffer::extract<ConsensusConfig>();
+        if (exists<ConsensusConfig>(@aptos_framework)) {
+            *borrow_global_mut<ConsensusConfig>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        };
+    }
+}
+
+ + + +
+ + + +## Function `validator_txn_enabled` + + + +
public fun validator_txn_enabled(): bool
+
+ + + +
+Implementation + + +
public fun validator_txn_enabled(): bool acquires ConsensusConfig {
+    let config_bytes = borrow_global<ConsensusConfig>(@aptos_framework).config;
+    validator_txn_enabled_internal(config_bytes)
+}
+
+ + + +
+ + + +## Function `validator_txn_enabled_internal` + + + +
fun validator_txn_enabled_internal(config_bytes: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun validator_txn_enabled_internal(config_bytes: vector<u8>): bool;
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During genesis, the Aptos framework account should be assigned the consensus config resource.MediumThe consensus_config::initialize function calls the assert_aptos_framework function to ensure that the signer is the aptos_framework and then assigns the ConsensusConfig resource to it.Formally verified via initialize.
2Only aptos framework account is allowed to update the consensus configuration.MediumThe consensus_config::set function ensures that the signer is aptos_framework.Formally verified via set.
3Only a valid configuration can be used during initialization and update.MediumBoth the initialize and set functions validate the config by ensuring its length to be greater than 0.Formally verified via initialize and set.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+invariant [suspendable] chain_status::is_operating() ==> exists<ConsensusConfig>(@aptos_framework);
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, config: vector<u8>)
+
+ + +Ensure caller is admin. +Aborts if StateStorageUsage already exists. + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if !system_addresses::is_aptos_framework_address(addr);
+aborts_if exists<ConsensusConfig>(@aptos_framework);
+// This enforces high-level requirement 3:
+aborts_if !(len(config) > 0);
+ensures global<ConsensusConfig>(addr) == ConsensusConfig { config };
+
+ + + + + +### Function `set` + + +
public fun set(account: &signer, config: vector<u8>)
+
+ + +Ensure the caller is admin and ConsensusConfig should be existed. +When setting now time must be later than last_reconfiguration_time. + + +
pragma verify_duration_estimate = 600;
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+include staking_config::StakingRewardsConfigRequirement;
+let addr = signer::address_of(account);
+// This enforces high-level requirement 2:
+aborts_if !system_addresses::is_aptos_framework_address(addr);
+aborts_if !exists<ConsensusConfig>(@aptos_framework);
+// This enforces high-level requirement 3:
+aborts_if !(len(config) > 0);
+requires chain_status::is_genesis();
+requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time();
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+ensures global<ConsensusConfig>(@aptos_framework).config == config;
+
+ + + + + +### Function `set_for_next_epoch` + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + + +
include config_buffer::SetForNextEpochAbortsIf;
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<ConsensusConfig>;
+aborts_if false;
+
+ + + + + +### Function `validator_txn_enabled` + + +
public fun validator_txn_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if !exists<ConsensusConfig>(@aptos_framework);
+ensures [abstract] result == spec_validator_txn_enabled_internal(global<ConsensusConfig>(@aptos_framework).config);
+
+ + + + + +### Function `validator_txn_enabled_internal` + + +
fun validator_txn_enabled_internal(config_bytes: vector<u8>): bool
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_validator_txn_enabled_internal(config_bytes);
+
+ + + + + + + +
fun spec_validator_txn_enabled_internal(config_bytes: vector<u8>): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/create_signer.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/create_signer.md new file mode 100644 index 0000000000000..4f1bb93d57071 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/create_signer.md @@ -0,0 +1,133 @@ + + + +# Module `0x1::create_signer` + +Provides a common place for exporting create_signer across the Aptos Framework. + +To use create_signer, add the module below, such that: +friend aptos_framework::friend_wants_create_signer +where friend_wants_create_signer is the module that needs create_signer. + +Note, that this is only available within the Aptos Framework. + +This exists to make auditing straight forward and to limit the need to depend +on account to have access to this. + + +- [Function `create_signer`](#0x1_create_signer_create_signer) +- [Specification](#@Specification_0) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `create_signer`](#@Specification_0_create_signer) + + +
+ + + + + +## Function `create_signer` + + + +
public(friend) fun create_signer(addr: address): signer
+
+ + + +
+Implementation + + +
public(friend) native fun create_signer(addr: address): signer;
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Obtaining a signer for an arbitrary account should only be available within the Aptos Framework.CriticalThe create_signer::create_signer function only allows friend modules to retrieve the signer for an arbitrarily address.Enforced through function visibility.
2The account owner should have the ability to create a signer for their account.MediumBefore an Account resource is created, a signer is created for the specified new_address, and later, the Account resource is assigned to this signer.Enforced by the move vm.
3An account should only be able to create a signer for another account if that account has granted it signing capabilities.CriticalThe Account resource holds a signer_capability_offer field which allows the owner to share the signer capability with other accounts.Formally verified via AccountContainsAddr.
4A signer should be returned for addresses that are not registered as accounts.LowThe signer is just a struct that wraps an address, allows for non-accounts to have a signer.Formally verified via create_signer.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `create_signer` + + +
public(friend) fun create_signer(addr: address): signer
+
+ + +Convert address to singer and return. + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] signer::address_of(result) == addr;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/delegation_pool.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/delegation_pool.md new file mode 100644 index 0000000000000..01029999242fe --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/delegation_pool.md @@ -0,0 +1,5479 @@ + + + +# Module `0x1::delegation_pool` + + +Allow multiple delegators to participate in the same stake pool in order to collect the minimum +stake required to join the validator set. Delegators are rewarded out of the validator rewards +proportionally to their stake and provided the same stake-management API as the stake pool owner. + +The main accounting logic in the delegation pool contract handles the following: +1. Tracks how much stake each delegator owns, privately deposited as well as earned. +Accounting individual delegator stakes is achieved through the shares-based pool defined at +aptos_std::pool_u64, hence delegators own shares rather than absolute stakes into the delegation pool. +2. Tracks rewards earned by the stake pool, implicitly by the delegation one, in the meantime +and distribute them accordingly. +3. Tracks lockup cycles on the stake pool in order to separate inactive stake (not earning rewards) +from pending_inactive stake (earning rewards) and allow its delegators to withdraw the former. +4. Tracks how much commission fee has to be paid to the operator out of incoming rewards before +distributing them to the internal pool_u64 pools. + +In order to distinguish between stakes in different states and route rewards accordingly, +separate pool_u64 pools are used for individual stake states: +1. one of active + pending_active stake +2. one of inactive stake FOR each past observed lockup cycle (OLC) on the stake pool +3. one of pending_inactive stake scheduled during this ongoing OLC + +As stake-state transitions and rewards are computed only at the stake pool level, the delegation pool +gets outdated. To mitigate this, at any interaction with the delegation pool, a process of synchronization +to the underlying stake pool is executed before the requested operation itself. + +At synchronization: +- stake deviations between the two pools are actually the rewards produced in the meantime. +- the commission fee is extracted from the rewards, the remaining stake is distributed to the internal +pool_u64 pools and then the commission stake used to buy shares for operator. +- if detecting that the lockup expired on the stake pool, the delegation pool will isolate its +pending_inactive stake (now inactive) and create a new pool_u64 to host future pending_inactive stake +scheduled this newly started lockup. +Detecting a lockup expiration on the stake pool resumes to detecting new inactive stake. + +Accounting main invariants: +- each stake-management operation (add/unlock/reactivate/withdraw) and operator change triggers +the synchronization process before executing its own function. +- each OLC maps to one or more real lockups on the stake pool, but not the opposite. Actually, only a real +lockup with 'activity' (which inactivated some unlocking stake) triggers the creation of a new OLC. +- unlocking and/or unlocked stake originating from different real lockups are never mixed together into +the same pool_u64. This invalidates the accounting of which rewards belong to whom. +- no delegator can have unlocking and/or unlocked stake (pending withdrawals) in different OLCs. This ensures +delegators do not have to keep track of the OLCs when they unlocked. When creating a new pending withdrawal, +the existing one is executed (withdrawn) if is already inactive. +- add_stake fees are always refunded, but only after the epoch when they have been charged ends. +- withdrawing pending_inactive stake (when validator had gone inactive before its lockup expired) +does not inactivate any stake additional to the requested one to ensure OLC would not advance indefinitely. +- the pending withdrawal exists at an OLC iff delegator owns some shares within the shares pool of that OLC. + +Example flow: +
    +
  1. A node operator creates a delegation pool by calling +initialize_delegation_pool and sets +its commission fee to 0% (for simplicity). A stake pool is created with no initial stake and owned by +a resource account controlled by the delegation pool.
  2. +
  3. Delegator A adds 100 stake which is converted to 100 shares into the active pool_u64
  4. +
  5. Operator joins the validator set as the stake pool has now the minimum stake
  6. +
  7. The stake pool earned rewards and now has 200 active stake. A's active shares are worth 200 coins as +the commission fee is 0%.
  8. +
  9. +
      +
    1. A requests unlock for 100 stake
    2. +
    3. Synchronization detects 200 - 100 active rewards which are entirely (0% commission) added to the active pool.
    4. +
    5. 100 coins = (100 * 100) / 200 = 50 shares are redeemed from the active pool and exchanged for 100 shares +into the pending_inactive one on A's behalf
    6. +
    +
  10. Delegator B adds 200 stake which is converted to (200 * 50) / 100 = 100 shares into the active pool
  11. +
  12. The stake pool earned rewards and now has 600 active and 200 pending_inactive stake.
  13. +
  14. +
      +
    1. A requests reactivate_stake for 100 stake
    2. +
    3. +Synchronization detects 600 - 300 active and 200 - 100 pending_inactive rewards which are both entirely +distributed to their corresponding pools +
    4. +
    5. +100 coins = (100 * 100) / 200 = 50 shares are redeemed from the pending_inactive pool and exchanged for +(100 * 150) / 600 = 25 shares into the active one on A's behalf +
    6. +
    +
  15. The lockup expires on the stake pool, inactivating the entire pending_inactive stake
  16. +
  17. +
      +
    1. B requests unlock for 100 stake
    2. +
    3. +Synchronization detects no active or pending_inactive rewards, but 0 -> 100 inactive stake on the stake pool, +so it advances the observed lockup cycle and creates a pool_u64 for the new lockup, hence allowing previous +pending_inactive shares to be redeemed
    4. +
    5. +100 coins = (100 * 175) / 700 = 25 shares are redeemed from the active pool and exchanged for 100 shares +into the new pending_inactive one on B's behalf +
    6. +
    +
  18. The stake pool earned rewards and now has some pending_inactive rewards.
  19. +
  20. +
      +
    1. A requests withdraw for its entire inactive stake
    2. +
    3. +Synchronization detects no new inactive stake, but some pending_inactive rewards which are distributed +to the (2nd) pending_inactive pool +
    4. +
    5. +A's 50 shares = (50 * 100) / 50 = 100 coins are redeemed from the (1st) inactive pool and 100 stake is +transferred to A +
    6. +
    +
+ + + +- [Resource `DelegationPoolOwnership`](#0x1_delegation_pool_DelegationPoolOwnership) +- [Struct `ObservedLockupCycle`](#0x1_delegation_pool_ObservedLockupCycle) +- [Resource `DelegationPool`](#0x1_delegation_pool_DelegationPool) +- [Struct `VotingRecordKey`](#0x1_delegation_pool_VotingRecordKey) +- [Struct `VoteDelegation`](#0x1_delegation_pool_VoteDelegation) +- [Struct `DelegatedVotes`](#0x1_delegation_pool_DelegatedVotes) +- [Resource `GovernanceRecords`](#0x1_delegation_pool_GovernanceRecords) +- [Resource `BeneficiaryForOperator`](#0x1_delegation_pool_BeneficiaryForOperator) +- [Resource `NextCommissionPercentage`](#0x1_delegation_pool_NextCommissionPercentage) +- [Resource `DelegationPoolAllowlisting`](#0x1_delegation_pool_DelegationPoolAllowlisting) +- [Struct `AddStake`](#0x1_delegation_pool_AddStake) +- [Struct `AddStakeEvent`](#0x1_delegation_pool_AddStakeEvent) +- [Struct `ReactivateStake`](#0x1_delegation_pool_ReactivateStake) +- [Struct `ReactivateStakeEvent`](#0x1_delegation_pool_ReactivateStakeEvent) +- [Struct `UnlockStake`](#0x1_delegation_pool_UnlockStake) +- [Struct `UnlockStakeEvent`](#0x1_delegation_pool_UnlockStakeEvent) +- [Struct `WithdrawStake`](#0x1_delegation_pool_WithdrawStake) +- [Struct `WithdrawStakeEvent`](#0x1_delegation_pool_WithdrawStakeEvent) +- [Struct `DistributeCommissionEvent`](#0x1_delegation_pool_DistributeCommissionEvent) +- [Struct `DistributeCommission`](#0x1_delegation_pool_DistributeCommission) +- [Struct `Vote`](#0x1_delegation_pool_Vote) +- [Struct `VoteEvent`](#0x1_delegation_pool_VoteEvent) +- [Struct `CreateProposal`](#0x1_delegation_pool_CreateProposal) +- [Struct `CreateProposalEvent`](#0x1_delegation_pool_CreateProposalEvent) +- [Struct `DelegateVotingPower`](#0x1_delegation_pool_DelegateVotingPower) +- [Struct `DelegateVotingPowerEvent`](#0x1_delegation_pool_DelegateVotingPowerEvent) +- [Struct `SetBeneficiaryForOperator`](#0x1_delegation_pool_SetBeneficiaryForOperator) +- [Struct `CommissionPercentageChange`](#0x1_delegation_pool_CommissionPercentageChange) +- [Struct `EnableDelegatorsAllowlisting`](#0x1_delegation_pool_EnableDelegatorsAllowlisting) +- [Struct `DisableDelegatorsAllowlisting`](#0x1_delegation_pool_DisableDelegatorsAllowlisting) +- [Struct `AllowlistDelegator`](#0x1_delegation_pool_AllowlistDelegator) +- [Struct `RemoveDelegatorFromAllowlist`](#0x1_delegation_pool_RemoveDelegatorFromAllowlist) +- [Struct `EvictDelegator`](#0x1_delegation_pool_EvictDelegator) +- [Constants](#@Constants_0) +- [Function `owner_cap_exists`](#0x1_delegation_pool_owner_cap_exists) +- [Function `get_owned_pool_address`](#0x1_delegation_pool_get_owned_pool_address) +- [Function `delegation_pool_exists`](#0x1_delegation_pool_delegation_pool_exists) +- [Function `partial_governance_voting_enabled`](#0x1_delegation_pool_partial_governance_voting_enabled) +- [Function `observed_lockup_cycle`](#0x1_delegation_pool_observed_lockup_cycle) +- [Function `is_next_commission_percentage_effective`](#0x1_delegation_pool_is_next_commission_percentage_effective) +- [Function `operator_commission_percentage`](#0x1_delegation_pool_operator_commission_percentage) +- [Function `operator_commission_percentage_next_lockup_cycle`](#0x1_delegation_pool_operator_commission_percentage_next_lockup_cycle) +- [Function `shareholders_count_active_pool`](#0x1_delegation_pool_shareholders_count_active_pool) +- [Function `get_delegation_pool_stake`](#0x1_delegation_pool_get_delegation_pool_stake) +- [Function `get_pending_withdrawal`](#0x1_delegation_pool_get_pending_withdrawal) +- [Function `get_stake`](#0x1_delegation_pool_get_stake) +- [Function `get_add_stake_fee`](#0x1_delegation_pool_get_add_stake_fee) +- [Function `can_withdraw_pending_inactive`](#0x1_delegation_pool_can_withdraw_pending_inactive) +- [Function `calculate_and_update_voter_total_voting_power`](#0x1_delegation_pool_calculate_and_update_voter_total_voting_power) +- [Function `calculate_and_update_remaining_voting_power`](#0x1_delegation_pool_calculate_and_update_remaining_voting_power) +- [Function `calculate_and_update_delegator_voter`](#0x1_delegation_pool_calculate_and_update_delegator_voter) +- [Function `calculate_and_update_voting_delegation`](#0x1_delegation_pool_calculate_and_update_voting_delegation) +- [Function `get_expected_stake_pool_address`](#0x1_delegation_pool_get_expected_stake_pool_address) +- [Function `min_remaining_secs_for_commission_change`](#0x1_delegation_pool_min_remaining_secs_for_commission_change) +- [Function `allowlisting_enabled`](#0x1_delegation_pool_allowlisting_enabled) +- [Function `delegator_allowlisted`](#0x1_delegation_pool_delegator_allowlisted) +- [Function `get_delegators_allowlist`](#0x1_delegation_pool_get_delegators_allowlist) +- [Function `initialize_delegation_pool`](#0x1_delegation_pool_initialize_delegation_pool) +- [Function `beneficiary_for_operator`](#0x1_delegation_pool_beneficiary_for_operator) +- [Function `enable_partial_governance_voting`](#0x1_delegation_pool_enable_partial_governance_voting) +- [Function `vote`](#0x1_delegation_pool_vote) +- [Function `create_proposal`](#0x1_delegation_pool_create_proposal) +- [Function `assert_owner_cap_exists`](#0x1_delegation_pool_assert_owner_cap_exists) +- [Function `assert_delegation_pool_exists`](#0x1_delegation_pool_assert_delegation_pool_exists) +- [Function `assert_min_active_balance`](#0x1_delegation_pool_assert_min_active_balance) +- [Function `assert_min_pending_inactive_balance`](#0x1_delegation_pool_assert_min_pending_inactive_balance) +- [Function `assert_partial_governance_voting_enabled`](#0x1_delegation_pool_assert_partial_governance_voting_enabled) +- [Function `assert_allowlisting_enabled`](#0x1_delegation_pool_assert_allowlisting_enabled) +- [Function `assert_delegator_allowlisted`](#0x1_delegation_pool_assert_delegator_allowlisted) +- [Function `coins_to_redeem_to_ensure_min_stake`](#0x1_delegation_pool_coins_to_redeem_to_ensure_min_stake) +- [Function `coins_to_transfer_to_ensure_min_stake`](#0x1_delegation_pool_coins_to_transfer_to_ensure_min_stake) +- [Function `retrieve_stake_pool_owner`](#0x1_delegation_pool_retrieve_stake_pool_owner) +- [Function `get_pool_address`](#0x1_delegation_pool_get_pool_address) +- [Function `get_delegator_active_shares`](#0x1_delegation_pool_get_delegator_active_shares) +- [Function `get_delegator_pending_inactive_shares`](#0x1_delegation_pool_get_delegator_pending_inactive_shares) +- [Function `get_used_voting_power`](#0x1_delegation_pool_get_used_voting_power) +- [Function `create_resource_account_seed`](#0x1_delegation_pool_create_resource_account_seed) +- [Function `borrow_mut_used_voting_power`](#0x1_delegation_pool_borrow_mut_used_voting_power) +- [Function `update_and_borrow_mut_delegator_vote_delegation`](#0x1_delegation_pool_update_and_borrow_mut_delegator_vote_delegation) +- [Function `update_and_borrow_mut_delegated_votes`](#0x1_delegation_pool_update_and_borrow_mut_delegated_votes) +- [Function `olc_with_index`](#0x1_delegation_pool_olc_with_index) +- [Function `calculate_total_voting_power`](#0x1_delegation_pool_calculate_total_voting_power) +- [Function `calculate_and_update_delegator_voter_internal`](#0x1_delegation_pool_calculate_and_update_delegator_voter_internal) +- [Function `calculate_and_update_delegated_votes`](#0x1_delegation_pool_calculate_and_update_delegated_votes) +- [Function `borrow_mut_delegators_allowlist`](#0x1_delegation_pool_borrow_mut_delegators_allowlist) +- [Function `set_operator`](#0x1_delegation_pool_set_operator) +- [Function `set_beneficiary_for_operator`](#0x1_delegation_pool_set_beneficiary_for_operator) +- [Function `update_commission_percentage`](#0x1_delegation_pool_update_commission_percentage) +- [Function `set_delegated_voter`](#0x1_delegation_pool_set_delegated_voter) +- [Function `delegate_voting_power`](#0x1_delegation_pool_delegate_voting_power) +- [Function `enable_delegators_allowlisting`](#0x1_delegation_pool_enable_delegators_allowlisting) +- [Function `disable_delegators_allowlisting`](#0x1_delegation_pool_disable_delegators_allowlisting) +- [Function `allowlist_delegator`](#0x1_delegation_pool_allowlist_delegator) +- [Function `remove_delegator_from_allowlist`](#0x1_delegation_pool_remove_delegator_from_allowlist) +- [Function `evict_delegator`](#0x1_delegation_pool_evict_delegator) +- [Function `add_stake`](#0x1_delegation_pool_add_stake) +- [Function `unlock`](#0x1_delegation_pool_unlock) +- [Function `unlock_internal`](#0x1_delegation_pool_unlock_internal) +- [Function `reactivate_stake`](#0x1_delegation_pool_reactivate_stake) +- [Function `withdraw`](#0x1_delegation_pool_withdraw) +- [Function `withdraw_internal`](#0x1_delegation_pool_withdraw_internal) +- [Function `pending_withdrawal_exists`](#0x1_delegation_pool_pending_withdrawal_exists) +- [Function `pending_inactive_shares_pool_mut`](#0x1_delegation_pool_pending_inactive_shares_pool_mut) +- [Function `pending_inactive_shares_pool`](#0x1_delegation_pool_pending_inactive_shares_pool) +- [Function `execute_pending_withdrawal`](#0x1_delegation_pool_execute_pending_withdrawal) +- [Function `buy_in_active_shares`](#0x1_delegation_pool_buy_in_active_shares) +- [Function `buy_in_pending_inactive_shares`](#0x1_delegation_pool_buy_in_pending_inactive_shares) +- [Function `amount_to_shares_to_redeem`](#0x1_delegation_pool_amount_to_shares_to_redeem) +- [Function `redeem_active_shares`](#0x1_delegation_pool_redeem_active_shares) +- [Function `redeem_inactive_shares`](#0x1_delegation_pool_redeem_inactive_shares) +- [Function `calculate_stake_pool_drift`](#0x1_delegation_pool_calculate_stake_pool_drift) +- [Function `synchronize_delegation_pool`](#0x1_delegation_pool_synchronize_delegation_pool) +- [Function `assert_and_update_proposal_used_voting_power`](#0x1_delegation_pool_assert_and_update_proposal_used_voting_power) +- [Function `update_governance_records_for_buy_in_active_shares`](#0x1_delegation_pool_update_governance_records_for_buy_in_active_shares) +- [Function `update_governance_records_for_buy_in_pending_inactive_shares`](#0x1_delegation_pool_update_governance_records_for_buy_in_pending_inactive_shares) +- [Function `update_governanace_records_for_redeem_active_shares`](#0x1_delegation_pool_update_governanace_records_for_redeem_active_shares) +- [Function `update_governanace_records_for_redeem_pending_inactive_shares`](#0x1_delegation_pool_update_governanace_records_for_redeem_pending_inactive_shares) +- [Function `multiply_then_divide`](#0x1_delegation_pool_multiply_then_divide) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + + +
use 0x1::account;
+use 0x1::aptos_account;
+use 0x1::aptos_coin;
+use 0x1::aptos_governance;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::pool_u64_unbound;
+use 0x1::signer;
+use 0x1::smart_table;
+use 0x1::stake;
+use 0x1::staking_config;
+use 0x1::table;
+use 0x1::table_with_length;
+use 0x1::timestamp;
+use 0x1::vector;
+
+ + + + + +## Resource `DelegationPoolOwnership` + +Capability that represents ownership over privileged operations on the underlying stake pool. + + +
struct DelegationPoolOwnership has store, key
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ equal to address of the resource account owning the stake pool +
+
+ + +
+ + + +## Struct `ObservedLockupCycle` + + + +
struct ObservedLockupCycle has copy, drop, store
+
+ + + +
+Fields + + +
+
+index: u64 +
+
+ +
+
+ + +
+ + + +## Resource `DelegationPool` + + + +
struct DelegationPool has key
+
+ + + +
+Fields + + +
+
+active_shares: pool_u64_unbound::Pool +
+
+ +
+
+observed_lockup_cycle: delegation_pool::ObservedLockupCycle +
+
+ +
+
+inactive_shares: table::Table<delegation_pool::ObservedLockupCycle, pool_u64_unbound::Pool> +
+
+ +
+
+pending_withdrawals: table::Table<address, delegation_pool::ObservedLockupCycle> +
+
+ +
+
+stake_pool_signer_cap: account::SignerCapability +
+
+ +
+
+total_coins_inactive: u64 +
+
+ +
+
+operator_commission_percentage: u64 +
+
+ +
+
+add_stake_events: event::EventHandle<delegation_pool::AddStakeEvent> +
+
+ +
+
+reactivate_stake_events: event::EventHandle<delegation_pool::ReactivateStakeEvent> +
+
+ +
+
+unlock_stake_events: event::EventHandle<delegation_pool::UnlockStakeEvent> +
+
+ +
+
+withdraw_stake_events: event::EventHandle<delegation_pool::WithdrawStakeEvent> +
+
+ +
+
+distribute_commission_events: event::EventHandle<delegation_pool::DistributeCommissionEvent> +
+
+ +
+
+ + +
+ + + +## Struct `VotingRecordKey` + + + +
struct VotingRecordKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+voter: address +
+
+ +
+
+proposal_id: u64 +
+
+ +
+
+ + +
+ + + +## Struct `VoteDelegation` + +Track delegated voter of each delegator. + + +
struct VoteDelegation has copy, drop, store
+
+ + + +
+Fields + + +
+
+voter: address +
+
+ +
+
+pending_voter: address +
+
+ +
+
+last_locked_until_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DelegatedVotes` + +Track total voting power of each voter. + + +
struct DelegatedVotes has copy, drop, store
+
+ + + +
+Fields + + +
+
+active_shares: u128 +
+
+ +
+
+pending_inactive_shares: u128 +
+
+ +
+
+active_shares_next_lockup: u128 +
+
+ +
+
+last_locked_until_secs: u64 +
+
+ +
+
+ + +
+ + + +## Resource `GovernanceRecords` + +Track governance information of a delegation(e.g. voter delegation/voting power calculation). +This struct should be stored in the delegation pool resource account. + + +
struct GovernanceRecords has key
+
+ + + +
+Fields + + +
+
+votes: smart_table::SmartTable<delegation_pool::VotingRecordKey, u64> +
+
+ +
+
+votes_per_proposal: smart_table::SmartTable<u64, u64> +
+
+ +
+
+vote_delegation: smart_table::SmartTable<address, delegation_pool::VoteDelegation> +
+
+ +
+
+delegated_votes: smart_table::SmartTable<address, delegation_pool::DelegatedVotes> +
+
+ +
+
+vote_events: event::EventHandle<delegation_pool::VoteEvent> +
+
+ +
+
+create_proposal_events: event::EventHandle<delegation_pool::CreateProposalEvent> +
+
+ +
+
+delegate_voting_power_events: event::EventHandle<delegation_pool::DelegateVotingPowerEvent> +
+
+ +
+
+ + +
+ + + +## Resource `BeneficiaryForOperator` + + + +
struct BeneficiaryForOperator has key
+
+ + + +
+Fields + + +
+
+beneficiary_for_operator: address +
+
+ +
+
+ + +
+ + + +## Resource `NextCommissionPercentage` + + + +
struct NextCommissionPercentage has key
+
+ + + +
+Fields + + +
+
+commission_percentage_next_lockup_cycle: u64 +
+
+ +
+
+effective_after_secs: u64 +
+
+ +
+
+ + +
+ + + +## Resource `DelegationPoolAllowlisting` + +Tracks a delegation pool's allowlist of delegators. +If allowlisting is enabled, existing delegators are not implicitly allowlisted and they can be individually +evicted later by the pool owner. + + +
struct DelegationPoolAllowlisting has key
+
+ + + +
+Fields + + +
+
+allowlist: smart_table::SmartTable<address, bool> +
+
+ +
+
+ + +
+ + + +## Struct `AddStake` + + + +
#[event]
+struct AddStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_added: u64 +
+
+ +
+
+add_stake_fee: u64 +
+
+ +
+
+ + +
+ + + +## Struct `AddStakeEvent` + + + +
struct AddStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_added: u64 +
+
+ +
+
+add_stake_fee: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ReactivateStake` + + + +
#[event]
+struct ReactivateStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_reactivated: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ReactivateStakeEvent` + + + +
struct ReactivateStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_reactivated: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UnlockStake` + + + +
#[event]
+struct UnlockStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_unlocked: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UnlockStakeEvent` + + + +
struct UnlockStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_unlocked: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawStake` + + + +
#[event]
+struct WithdrawStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_withdrawn: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawStakeEvent` + + + +
struct WithdrawStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+amount_withdrawn: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DistributeCommissionEvent` + + + +
#[event]
+struct DistributeCommissionEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+operator: address +
+
+ +
+
+commission_active: u64 +
+
+ +
+
+commission_pending_inactive: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DistributeCommission` + + + +
#[event]
+struct DistributeCommission has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+operator: address +
+
+ +
+
+beneficiary: address +
+
+ +
+
+commission_active: u64 +
+
+ +
+
+commission_pending_inactive: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Vote` + + + +
#[event]
+struct Vote has drop, store
+
+ + + +
+Fields + + +
+
+voter: address +
+
+ +
+
+proposal_id: u64 +
+
+ +
+
+delegation_pool: address +
+
+ +
+
+num_votes: u64 +
+
+ +
+
+should_pass: bool +
+
+ +
+
+ + +
+ + + +## Struct `VoteEvent` + + + +
struct VoteEvent has drop, store
+
+ + + +
+Fields + + +
+
+voter: address +
+
+ +
+
+proposal_id: u64 +
+
+ +
+
+delegation_pool: address +
+
+ +
+
+num_votes: u64 +
+
+ +
+
+should_pass: bool +
+
+ +
+
+ + +
+ + + +## Struct `CreateProposal` + + + +
#[event]
+struct CreateProposal has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+voter: address +
+
+ +
+
+delegation_pool: address +
+
+ +
+
+ + +
+ + + +## Struct `CreateProposalEvent` + + + +
struct CreateProposalEvent has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+voter: address +
+
+ +
+
+delegation_pool: address +
+
+ +
+
+ + +
+ + + +## Struct `DelegateVotingPower` + + + +
#[event]
+struct DelegateVotingPower has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator: address +
+
+ +
+
+voter: address +
+
+ +
+
+ + +
+ + + +## Struct `DelegateVotingPowerEvent` + + + +
struct DelegateVotingPowerEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator: address +
+
+ +
+
+voter: address +
+
+ +
+
+ + +
+ + + +## Struct `SetBeneficiaryForOperator` + + + +
#[event]
+struct SetBeneficiaryForOperator has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+old_beneficiary: address +
+
+ +
+
+new_beneficiary: address +
+
+ +
+
+ + +
+ + + +## Struct `CommissionPercentageChange` + + + +
#[event]
+struct CommissionPercentageChange has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+owner: address +
+
+ +
+
+commission_percentage_next_lockup_cycle: u64 +
+
+ +
+
+ + +
+ + + +## Struct `EnableDelegatorsAllowlisting` + + + +
#[event]
+struct EnableDelegatorsAllowlisting has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `DisableDelegatorsAllowlisting` + + + +
#[event]
+struct DisableDelegatorsAllowlisting has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AllowlistDelegator` + + + +
#[event]
+struct AllowlistDelegator has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+ + +
+ + + +## Struct `RemoveDelegatorFromAllowlist` + + + +
#[event]
+struct RemoveDelegatorFromAllowlist has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+ + +
+ + + +## Struct `EvictDelegator` + + + +
#[event]
+struct EvictDelegator has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+delegator_address: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + +Function is deprecated. + + +
const EDEPRECATED_FUNCTION: u64 = 12;
+
+ + + + + +The function is disabled or hasn't been enabled. + + +
const EDISABLED_FUNCTION: u64 = 13;
+
+ + + + + +The account is not the operator of the stake pool. + + +
const ENOT_OPERATOR: u64 = 18;
+
+ + + + + +Account is already owning a delegation pool. + + +
const EOWNER_CAP_ALREADY_EXISTS: u64 = 2;
+
+ + + + + +Delegation pool owner capability does not exist at the provided account. + + +
const EOWNER_CAP_NOT_FOUND: u64 = 1;
+
+ + + + + + + +
const VALIDATOR_STATUS_INACTIVE: u64 = 4;
+
+ + + + + +The voter does not have sufficient stake to create a proposal. + + +
const EINSUFFICIENT_PROPOSER_STAKE: u64 = 15;
+
+ + + + + +The voter does not have any voting power on this proposal. + + +
const ENO_VOTING_POWER: u64 = 16;
+
+ + + + + +The stake pool has already voted on the proposal before enabling partial governance voting on this delegation pool. + + +
const EALREADY_VOTED_BEFORE_ENABLE_PARTIAL_VOTING: u64 = 17;
+
+ + + + + +Cannot evict an allowlisted delegator, should remove them from the allowlist first. + + +
const ECANNOT_EVICT_ALLOWLISTED_DELEGATOR: u64 = 26;
+
+ + + + + +Cannot unlock the accumulated active stake of NULL_SHAREHOLDER(0x0). + + +
const ECANNOT_UNLOCK_NULL_SHAREHOLDER: u64 = 27;
+
+ + + + + +Changing operator commission rate in delegation pool is not supported. + + +
const ECOMMISSION_RATE_CHANGE_NOT_SUPPORTED: u64 = 22;
+
+ + + + + +Creating delegation pools is not enabled yet. + + +
const EDELEGATION_POOLS_DISABLED: u64 = 10;
+
+ + + + + +Delegation pool does not exist at the provided pool address. + + +
const EDELEGATION_POOL_DOES_NOT_EXIST: u64 = 3;
+
+ + + + + +Delegators allowlisting should be enabled to perform this operation. + + +
const EDELEGATORS_ALLOWLISTING_NOT_ENABLED: u64 = 24;
+
+ + + + + +Delegators allowlisting is not supported. + + +
const EDELEGATORS_ALLOWLISTING_NOT_SUPPORTED: u64 = 23;
+
+ + + + + +Delegator's active balance cannot be less than MIN_COINS_ON_SHARES_POOL. + + +
const EDELEGATOR_ACTIVE_BALANCE_TOO_LOW: u64 = 8;
+
+ + + + + +Cannot add/reactivate stake unless being allowlisted by the pool owner. + + +
const EDELEGATOR_NOT_ALLOWLISTED: u64 = 25;
+
+ + + + + +Delegator's pending_inactive balance cannot be less than MIN_COINS_ON_SHARES_POOL. + + +
const EDELEGATOR_PENDING_INACTIVE_BALANCE_TOO_LOW: u64 = 9;
+
+ + + + + +Commission percentage has to be between 0 and MAX_FEE - 100%. + + +
const EINVALID_COMMISSION_PERCENTAGE: u64 = 5;
+
+ + + + + +There is not enough active stake on the stake pool to unlock. + + +
const ENOT_ENOUGH_ACTIVE_STAKE_TO_UNLOCK: u64 = 6;
+
+ + + + + +Changing beneficiaries for operators is not supported. + + +
const EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED: u64 = 19;
+
+ + + + + +Partial governance voting hasn't been enabled on this delegation pool. + + +
const EPARTIAL_GOVERNANCE_VOTING_NOT_ENABLED: u64 = 14;
+
+ + + + + +There is a pending withdrawal to be executed before unlocking any new stake. + + +
const EPENDING_WITHDRAWAL_EXISTS: u64 = 4;
+
+ + + + + +Slashing (if implemented) should not be applied to already inactive stake. +Not only it invalidates the accounting of past observed lockup cycles (OLC), +but is also unfair to delegators whose stake has been inactive before validator started misbehaving. +Additionally, the inactive stake does not count on the voting power of validator. + + +
const ESLASHED_INACTIVE_STAKE_ON_PAST_OLC: u64 = 7;
+
+ + + + + +Commission percentage increase is too large. + + +
const ETOO_LARGE_COMMISSION_INCREASE: u64 = 20;
+
+ + + + + +Commission percentage change is too late in this lockup period, and should be done at least a quarter (1/4) of the lockup duration before the lockup cycle ends. + + +
const ETOO_LATE_COMMISSION_CHANGE: u64 = 21;
+
+ + + + + +Cannot request to withdraw zero stake. + + +
const EWITHDRAW_ZERO_STAKE: u64 = 11;
+
+ + + + + +Maximum commission percentage increase per lockup cycle. 10% is represented as 1000. + + +
const MAX_COMMISSION_INCREASE: u64 = 1000;
+
+ + + + + +Maximum operator percentage fee(of double digit precision): 22.85% is represented as 2285 + + +
const MAX_FEE: u64 = 10000;
+
+ + + + + +Minimum coins to exist on a shares pool at all times. +Enforced per delegator for both active and pending_inactive pools. +This constraint ensures the share price cannot overly increase and lead to +substantial losses when buying shares (can lose at most 1 share which may +be worth a lot if current share price is high). +This constraint is not enforced on inactive pools as they only allow redeems +(can lose at most 1 coin regardless of current share price). + + +
const MIN_COINS_ON_SHARES_POOL: u64 = 1000000000;
+
+ + + + + + + +
const MODULE_SALT: vector<u8> = [97, 112, 116, 111, 115, 95, 102, 114, 97, 109, 101, 119, 111, 114, 107, 58, 58, 100, 101, 108, 101, 103, 97, 116, 105, 111, 110, 95, 112, 111, 111, 108];
+
+ + + + + +Special shareholder temporarily owning the add_stake fees charged during this epoch. +On each add_stake operation any resulted fee is used to buy active shares for this shareholder. +First synchronization after this epoch ends will distribute accumulated fees to the rest of the pool as refunds. + + +
const NULL_SHAREHOLDER: address = 0x0;
+
+ + + + + +Scaling factor of shares pools used within the delegation pool + + +
const SHARES_SCALING_FACTOR: u64 = 10000000000000000;
+
+ + + + + +## Function `owner_cap_exists` + +Return whether supplied address addr is owner of a delegation pool. + + +
#[view]
+public fun owner_cap_exists(addr: address): bool
+
+ + + +
+Implementation + + +
public fun owner_cap_exists(addr: address): bool {
+    exists<DelegationPoolOwnership>(addr)
+}
+
+ + + +
+ + + +## Function `get_owned_pool_address` + +Return address of the delegation pool owned by owner or fail if there is none. + + +
#[view]
+public fun get_owned_pool_address(owner: address): address
+
+ + + +
+Implementation + + +
public fun get_owned_pool_address(owner: address): address acquires DelegationPoolOwnership {
+    assert_owner_cap_exists(owner);
+    borrow_global<DelegationPoolOwnership>(owner).pool_address
+}
+
+ + + +
+ + + +## Function `delegation_pool_exists` + +Return whether a delegation pool exists at supplied address addr. + + +
#[view]
+public fun delegation_pool_exists(addr: address): bool
+
+ + + +
+Implementation + + +
public fun delegation_pool_exists(addr: address): bool {
+    exists<DelegationPool>(addr)
+}
+
+ + + +
+ + + +## Function `partial_governance_voting_enabled` + +Return whether a delegation pool has already enabled partial governance voting. + + +
#[view]
+public fun partial_governance_voting_enabled(pool_address: address): bool
+
+ + + +
+Implementation + + +
public fun partial_governance_voting_enabled(pool_address: address): bool {
+    exists<GovernanceRecords>(pool_address) && stake::get_delegated_voter(pool_address) == pool_address
+}
+
+ + + +
+ + + +## Function `observed_lockup_cycle` + +Return the index of current observed lockup cycle on delegation pool pool_address. + + +
#[view]
+public fun observed_lockup_cycle(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun observed_lockup_cycle(pool_address: address): u64 acquires DelegationPool {
+    assert_delegation_pool_exists(pool_address);
+    borrow_global<DelegationPool>(pool_address).observed_lockup_cycle.index
+}
+
+ + + +
+ + + +## Function `is_next_commission_percentage_effective` + +Return whether the commission percentage for the next lockup cycle is effective. + + +
#[view]
+public fun is_next_commission_percentage_effective(pool_address: address): bool
+
+ + + +
+Implementation + + +
public fun is_next_commission_percentage_effective(pool_address: address): bool acquires NextCommissionPercentage {
+    exists<NextCommissionPercentage>(pool_address) &&
+        timestamp::now_seconds() >= borrow_global<NextCommissionPercentage>(pool_address).effective_after_secs
+}
+
+ + + +
+ + + +## Function `operator_commission_percentage` + +Return the operator commission percentage set on the delegation pool pool_address. + + +
#[view]
+public fun operator_commission_percentage(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun operator_commission_percentage(
+    pool_address: address
+): u64 acquires DelegationPool, NextCommissionPercentage {
+    assert_delegation_pool_exists(pool_address);
+    if (is_next_commission_percentage_effective(pool_address)) {
+        operator_commission_percentage_next_lockup_cycle(pool_address)
+    } else {
+        borrow_global<DelegationPool>(pool_address).operator_commission_percentage
+    }
+}
+
+ + + +
+ + + +## Function `operator_commission_percentage_next_lockup_cycle` + +Return the operator commission percentage for the next lockup cycle. + + +
#[view]
+public fun operator_commission_percentage_next_lockup_cycle(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun operator_commission_percentage_next_lockup_cycle(
+    pool_address: address
+): u64 acquires DelegationPool, NextCommissionPercentage {
+    assert_delegation_pool_exists(pool_address);
+    if (exists<NextCommissionPercentage>(pool_address)) {
+        borrow_global<NextCommissionPercentage>(pool_address).commission_percentage_next_lockup_cycle
+    } else {
+        borrow_global<DelegationPool>(pool_address).operator_commission_percentage
+    }
+}
+
+ + + +
+ + + +## Function `shareholders_count_active_pool` + +Return the number of delegators owning active stake within pool_address. + + +
#[view]
+public fun shareholders_count_active_pool(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun shareholders_count_active_pool(pool_address: address): u64 acquires DelegationPool {
+    assert_delegation_pool_exists(pool_address);
+    pool_u64::shareholders_count(&borrow_global<DelegationPool>(pool_address).active_shares)
+}
+
+ + + +
+ + + +## Function `get_delegation_pool_stake` + +Return the stake amounts on pool_address in the different states: +(active,inactive,pending_active,pending_inactive) + + +
#[view]
+public fun get_delegation_pool_stake(pool_address: address): (u64, u64, u64, u64)
+
+ + + +
+Implementation + + +
public fun get_delegation_pool_stake(pool_address: address): (u64, u64, u64, u64) {
+    assert_delegation_pool_exists(pool_address);
+    stake::get_stake(pool_address)
+}
+
+ + + +
+ + + +## Function `get_pending_withdrawal` + +Return whether the given delegator has any withdrawable stake. If they recently requested to unlock +some stake and the stake pool's lockup cycle has not ended, their coins are not withdrawable yet. + + +
#[view]
+public fun get_pending_withdrawal(pool_address: address, delegator_address: address): (bool, u64)
+
+ + + +
+Implementation + + +
public fun get_pending_withdrawal(
+    pool_address: address,
+    delegator_address: address
+): (bool, u64) acquires DelegationPool {
+    assert_delegation_pool_exists(pool_address);
+    let pool = borrow_global<DelegationPool>(pool_address);
+    let (
+        lockup_cycle_ended,
+        _,
+        pending_inactive,
+        _,
+        commission_pending_inactive
+    ) = calculate_stake_pool_drift(pool);
+
+    let (withdrawal_exists, withdrawal_olc) = pending_withdrawal_exists(pool, delegator_address);
+    if (!withdrawal_exists) {
+        // if no pending withdrawal, there is neither inactive nor pending_inactive stake
+        (false, 0)
+    } else {
+        // delegator has either inactive or pending_inactive stake due to automatic withdrawals
+        let inactive_shares = table::borrow(&pool.inactive_shares, withdrawal_olc);
+        if (withdrawal_olc.index < pool.observed_lockup_cycle.index) {
+            // if withdrawal's lockup cycle ended on delegation pool then it is inactive
+            (true, pool_u64::balance(inactive_shares, delegator_address))
+        } else {
+            pending_inactive = pool_u64::shares_to_amount_with_total_coins(
+                inactive_shares,
+                pool_u64::shares(inactive_shares, delegator_address),
+                // exclude operator pending_inactive rewards not converted to shares yet
+                pending_inactive - commission_pending_inactive
+            );
+            // if withdrawal's lockup cycle ended ONLY on stake pool then it is also inactive
+            (lockup_cycle_ended, pending_inactive)
+        }
+    }
+}
+
+ + + +
+ + + +## Function `get_stake` + +Return total stake owned by delegator_address within delegation pool pool_address +in each of its individual states: (active,inactive,pending_inactive) + + +
#[view]
+public fun get_stake(pool_address: address, delegator_address: address): (u64, u64, u64)
+
+ + + +
+Implementation + + +
public fun get_stake(
+    pool_address: address,
+    delegator_address: address
+): (u64, u64, u64) acquires DelegationPool, BeneficiaryForOperator {
+    assert_delegation_pool_exists(pool_address);
+    let pool = borrow_global<DelegationPool>(pool_address);
+    let (
+        lockup_cycle_ended,
+        active,
+        _,
+        commission_active,
+        commission_pending_inactive
+    ) = calculate_stake_pool_drift(pool);
+
+    let total_active_shares = pool_u64::total_shares(&pool.active_shares);
+    let delegator_active_shares = pool_u64::shares(&pool.active_shares, delegator_address);
+
+    let (_, _, pending_active, _) = stake::get_stake(pool_address);
+    if (pending_active == 0) {
+        // zero `pending_active` stake indicates that either there are no `add_stake` fees or
+        // previous epoch has ended and should identify shares owning these fees as released
+        total_active_shares = total_active_shares - pool_u64::shares(&pool.active_shares, NULL_SHAREHOLDER);
+        if (delegator_address == NULL_SHAREHOLDER) {
+            delegator_active_shares = 0
+        }
+    };
+    active = pool_u64::shares_to_amount_with_total_stats(
+        &pool.active_shares,
+        delegator_active_shares,
+        // exclude operator active rewards not converted to shares yet
+        active - commission_active,
+        total_active_shares
+    );
+
+    // get state and stake (0 if there is none) of the pending withdrawal
+    let (withdrawal_inactive, withdrawal_stake) = get_pending_withdrawal(pool_address, delegator_address);
+    // report non-active stakes accordingly to the state of the pending withdrawal
+    let (inactive, pending_inactive) = if (withdrawal_inactive) (withdrawal_stake, 0) else (0, withdrawal_stake);
+
+    // should also include commission rewards in case of the operator account
+    // operator rewards are actually used to buy shares which is introducing
+    // some imprecision (received stake would be slightly less)
+    // but adding rewards onto the existing stake is still a good approximation
+    if (delegator_address == beneficiary_for_operator(get_operator(pool_address))) {
+        active = active + commission_active;
+        // in-flight pending_inactive commission can coexist with already inactive withdrawal
+        if (lockup_cycle_ended) {
+            inactive = inactive + commission_pending_inactive
+        } else {
+            pending_inactive = pending_inactive + commission_pending_inactive
+        }
+    };
+
+    (active, inactive, pending_inactive)
+}
+
+ + + +
+ + + +## Function `get_add_stake_fee` + +Return refundable stake to be extracted from added amount at add_stake operation on pool pool_address. +If the validator produces rewards this epoch, added stake goes directly to pending_active and +does not earn rewards. However, all shares within a pool appreciate uniformly and when this epoch ends: +- either added shares are still pending_active and steal from rewards of existing active stake +- or have moved to pending_inactive and get full rewards (they displaced active stake at unlock) +To mitigate this, some of the added stake is extracted and fed back into the pool as placeholder +for the rewards the remaining stake would have earned if active: +extracted-fee = (amount - extracted-fee) * reward-rate% * (100% - operator-commission%) + + +
#[view]
+public fun get_add_stake_fee(pool_address: address, amount: u64): u64
+
+ + + +
+Implementation + + +
public fun get_add_stake_fee(
+    pool_address: address,
+    amount: u64
+): u64 acquires DelegationPool, NextCommissionPercentage {
+    if (stake::is_current_epoch_validator(pool_address)) {
+        let (rewards_rate, rewards_rate_denominator) = staking_config::get_reward_rate(&staking_config::get());
+        if (rewards_rate_denominator > 0) {
+            assert_delegation_pool_exists(pool_address);
+
+            rewards_rate = rewards_rate * (MAX_FEE - operator_commission_percentage(pool_address));
+            rewards_rate_denominator = rewards_rate_denominator * MAX_FEE;
+            ((((amount as u128) * (rewards_rate as u128)) / ((rewards_rate as u128) + (rewards_rate_denominator as u128))) as u64)
+        } else { 0 }
+    } else { 0 }
+}
+
+ + + +
+ + + +## Function `can_withdraw_pending_inactive` + +Return whether pending_inactive stake can be directly withdrawn from +the delegation pool, implicitly its stake pool, in the special case +the validator had gone inactive before its lockup expired. + + +
#[view]
+public fun can_withdraw_pending_inactive(pool_address: address): bool
+
+ + + +
+Implementation + + +
public fun can_withdraw_pending_inactive(pool_address: address): bool {
+    stake::get_validator_state(pool_address) == VALIDATOR_STATUS_INACTIVE &&
+        timestamp::now_seconds() >= stake::get_lockup_secs(pool_address)
+}
+
+ + + +
+ + + +## Function `calculate_and_update_voter_total_voting_power` + +Return the total voting power of a delegator in a delegation pool. This function syncs DelegationPool to the +latest state. + + +
#[view]
+public fun calculate_and_update_voter_total_voting_power(pool_address: address, voter: address): u64
+
+ + + +
+Implementation + + +
public fun calculate_and_update_voter_total_voting_power(
+    pool_address: address,
+    voter: address
+): u64 acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert_partial_governance_voting_enabled(pool_address);
+    // Delegation pool need to be synced to explain rewards(which could change the coin amount) and
+    // commission(which could cause share transfer).
+    synchronize_delegation_pool(pool_address);
+    let pool = borrow_global<DelegationPool>(pool_address);
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let latest_delegated_votes = update_and_borrow_mut_delegated_votes(pool, governance_records, voter);
+    calculate_total_voting_power(pool, latest_delegated_votes)
+}
+
+ + + +
+ + + +## Function `calculate_and_update_remaining_voting_power` + +Return the remaining voting power of a delegator in a delegation pool on a proposal. This function syncs DelegationPool to the +latest state. + + +
#[view]
+public fun calculate_and_update_remaining_voting_power(pool_address: address, voter_address: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
public fun calculate_and_update_remaining_voting_power(
+    pool_address: address,
+    voter_address: address,
+    proposal_id: u64
+): u64 acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert_partial_governance_voting_enabled(pool_address);
+    // If the whole stake pool has no voting power(e.g. it has already voted before partial
+    // governance voting flag is enabled), the delegator also has no voting power.
+    if (aptos_governance::get_remaining_voting_power(pool_address, proposal_id) == 0) {
+        return 0
+    };
+
+    let total_voting_power = calculate_and_update_voter_total_voting_power(pool_address, voter_address);
+    let governance_records = borrow_global<GovernanceRecords>(pool_address);
+    total_voting_power - get_used_voting_power(governance_records, voter_address, proposal_id)
+}
+
+ + + +
+ + + +## Function `calculate_and_update_delegator_voter` + +Return the latest delegated voter of a delegator in a delegation pool. This function syncs DelegationPool to the +latest state. + + +
#[view]
+public fun calculate_and_update_delegator_voter(pool_address: address, delegator_address: address): address
+
+ + + +
+Implementation + + +
public fun calculate_and_update_delegator_voter(
+    pool_address: address,
+    delegator_address: address
+): address acquires DelegationPool, GovernanceRecords {
+    assert_partial_governance_voting_enabled(pool_address);
+    calculate_and_update_delegator_voter_internal(
+        borrow_global<DelegationPool>(pool_address),
+        borrow_global_mut<GovernanceRecords>(pool_address),
+        delegator_address
+    )
+}
+
+ + + +
+ + + +## Function `calculate_and_update_voting_delegation` + +Return the current state of a voting delegation of a delegator in a delegation pool. + + +
#[view]
+public fun calculate_and_update_voting_delegation(pool_address: address, delegator_address: address): (address, address, u64)
+
+ + + +
+Implementation + + +
public fun calculate_and_update_voting_delegation(
+    pool_address: address,
+    delegator_address: address
+): (address, address, u64) acquires DelegationPool, GovernanceRecords {
+    assert_partial_governance_voting_enabled(pool_address);
+    let vote_delegation = update_and_borrow_mut_delegator_vote_delegation(
+        borrow_global<DelegationPool>(pool_address),
+        borrow_global_mut<GovernanceRecords>(pool_address),
+        delegator_address
+    );
+
+    (vote_delegation.voter, vote_delegation.pending_voter, vote_delegation.last_locked_until_secs)
+}
+
+ + + +
+ + + +## Function `get_expected_stake_pool_address` + +Return the address of the stake pool to be created with the provided owner, and seed. + + +
#[view]
+public fun get_expected_stake_pool_address(owner: address, delegation_pool_creation_seed: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun get_expected_stake_pool_address(owner: address, delegation_pool_creation_seed: vector<u8>
+): address {
+    let seed = create_resource_account_seed(delegation_pool_creation_seed);
+    account::create_resource_address(&owner, seed)
+}
+
+ + + +
+ + + +## Function `min_remaining_secs_for_commission_change` + +Return the minimum remaining time in seconds for commission change, which is one fourth of the lockup duration. + + +
#[view]
+public fun min_remaining_secs_for_commission_change(): u64
+
+ + + +
+Implementation + + +
public fun min_remaining_secs_for_commission_change(): u64 {
+    let config = staking_config::get();
+    staking_config::get_recurring_lockup_duration(&config) / 4
+}
+
+ + + +
+ + + +## Function `allowlisting_enabled` + +Return whether allowlisting is enabled for the provided delegation pool. + + +
#[view]
+public fun allowlisting_enabled(pool_address: address): bool
+
+ + + +
+Implementation + + +
public fun allowlisting_enabled(pool_address: address): bool {
+    assert_delegation_pool_exists(pool_address);
+    exists<DelegationPoolAllowlisting>(pool_address)
+}
+
+ + + +
+ + + +## Function `delegator_allowlisted` + +Return whether the provided delegator is allowlisted. +A delegator is allowlisted if: +- allowlisting is disabled on the pool +- delegator is part of the allowlist + + +
#[view]
+public fun delegator_allowlisted(pool_address: address, delegator_address: address): bool
+
+ + + +
+Implementation + + +
public fun delegator_allowlisted(
+    pool_address: address,
+    delegator_address: address,
+): bool acquires DelegationPoolAllowlisting {
+    if (!allowlisting_enabled(pool_address)) { return true };
+    smart_table::contains(freeze(borrow_mut_delegators_allowlist(pool_address)), delegator_address)
+}
+
+ + + +
+ + + +## Function `get_delegators_allowlist` + +Return allowlist or revert if allowlisting is not enabled for the provided delegation pool. + + +
#[view]
+public fun get_delegators_allowlist(pool_address: address): vector<address>
+
+ + + +
+Implementation + + +
public fun get_delegators_allowlist(
+    pool_address: address,
+): vector<address> acquires DelegationPoolAllowlisting {
+    assert_allowlisting_enabled(pool_address);
+
+    let allowlist = vector[];
+    smart_table::for_each_ref(freeze(borrow_mut_delegators_allowlist(pool_address)), |delegator, _v| {
+        vector::push_back(&mut allowlist, *delegator);
+    });
+    allowlist
+}
+
+ + + +
+ + + +## Function `initialize_delegation_pool` + +Initialize a delegation pool of custom fixed operator_commission_percentage. +A resource account is created from owner signer and its supplied delegation_pool_creation_seed +to host the delegation pool resource and own the underlying stake pool. +Ownership over setting the operator/voter is granted to owner who has both roles initially. + + +
public entry fun initialize_delegation_pool(owner: &signer, operator_commission_percentage: u64, delegation_pool_creation_seed: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun initialize_delegation_pool(
+    owner: &signer,
+    operator_commission_percentage: u64,
+    delegation_pool_creation_seed: vector<u8>,
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert!(features::delegation_pools_enabled(), error::invalid_state(EDELEGATION_POOLS_DISABLED));
+    let owner_address = signer::address_of(owner);
+    assert!(!owner_cap_exists(owner_address), error::already_exists(EOWNER_CAP_ALREADY_EXISTS));
+    assert!(operator_commission_percentage <= MAX_FEE, error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE));
+
+    // generate a seed to be used to create the resource account hosting the delegation pool
+    let seed = create_resource_account_seed(delegation_pool_creation_seed);
+
+    let (stake_pool_signer, stake_pool_signer_cap) = account::create_resource_account(owner, seed);
+    coin::register<AptosCoin>(&stake_pool_signer);
+
+    // stake_pool_signer will be owner of the stake pool and have its `stake::OwnerCapability`
+    let pool_address = signer::address_of(&stake_pool_signer);
+    stake::initialize_stake_owner(&stake_pool_signer, 0, owner_address, owner_address);
+
+    let inactive_shares = table::new<ObservedLockupCycle, pool_u64::Pool>();
+    table::add(
+        &mut inactive_shares,
+        olc_with_index(0),
+        pool_u64::create_with_scaling_factor(SHARES_SCALING_FACTOR)
+    );
+
+    move_to(&stake_pool_signer, DelegationPool {
+        active_shares: pool_u64::create_with_scaling_factor(SHARES_SCALING_FACTOR),
+        observed_lockup_cycle: olc_with_index(0),
+        inactive_shares,
+        pending_withdrawals: table::new<address, ObservedLockupCycle>(),
+        stake_pool_signer_cap,
+        total_coins_inactive: 0,
+        operator_commission_percentage,
+        add_stake_events: account::new_event_handle<AddStakeEvent>(&stake_pool_signer),
+        reactivate_stake_events: account::new_event_handle<ReactivateStakeEvent>(&stake_pool_signer),
+        unlock_stake_events: account::new_event_handle<UnlockStakeEvent>(&stake_pool_signer),
+        withdraw_stake_events: account::new_event_handle<WithdrawStakeEvent>(&stake_pool_signer),
+        distribute_commission_events: account::new_event_handle<DistributeCommissionEvent>(&stake_pool_signer),
+    });
+
+    // save delegation pool ownership and resource account address (inner stake pool address) on `owner`
+    move_to(owner, DelegationPoolOwnership { pool_address });
+
+    // All delegation pool enable partial governance voting by default once the feature flag is enabled.
+    if (features::partial_governance_voting_enabled(
+    ) && features::delegation_pool_partial_governance_voting_enabled()) {
+        enable_partial_governance_voting(pool_address);
+    }
+}
+
+ + + +
+ + + +## Function `beneficiary_for_operator` + +Return the beneficiary address of the operator. + + +
#[view]
+public fun beneficiary_for_operator(operator: address): address
+
+ + + +
+Implementation + + +
public fun beneficiary_for_operator(operator: address): address acquires BeneficiaryForOperator {
+    if (exists<BeneficiaryForOperator>(operator)) {
+        return borrow_global<BeneficiaryForOperator>(operator).beneficiary_for_operator
+    } else {
+        operator
+    }
+}
+
+ + + +
+ + + +## Function `enable_partial_governance_voting` + +Enable partial governance voting on a stake pool. The voter of this stake pool will be managed by this module. +The existing voter will be replaced. The function is permissionless. + + +
public entry fun enable_partial_governance_voting(pool_address: address)
+
+ + + +
+Implementation + + +
public entry fun enable_partial_governance_voting(
+    pool_address: address,
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert!(features::partial_governance_voting_enabled(), error::invalid_state(EDISABLED_FUNCTION));
+    assert!(
+        features::delegation_pool_partial_governance_voting_enabled(),
+        error::invalid_state(EDISABLED_FUNCTION)
+    );
+    assert_delegation_pool_exists(pool_address);
+    // synchronize delegation and stake pools before any user operation.
+    synchronize_delegation_pool(pool_address);
+
+    let delegation_pool = borrow_global<DelegationPool>(pool_address);
+    let stake_pool_signer = retrieve_stake_pool_owner(delegation_pool);
+    // delegated_voter is managed by the stake pool itself, which signer capability is managed by DelegationPool.
+    // So voting power of this stake pool can only be used through this module.
+    stake::set_delegated_voter(&stake_pool_signer, signer::address_of(&stake_pool_signer));
+
+    move_to(&stake_pool_signer, GovernanceRecords {
+        votes: smart_table::new(),
+        votes_per_proposal: smart_table::new(),
+        vote_delegation: smart_table::new(),
+        delegated_votes: smart_table::new(),
+        vote_events: account::new_event_handle<VoteEvent>(&stake_pool_signer),
+        create_proposal_events: account::new_event_handle<CreateProposalEvent>(&stake_pool_signer),
+        delegate_voting_power_events: account::new_event_handle<DelegateVotingPowerEvent>(&stake_pool_signer),
+    });
+}
+
+ + + +
+ + + +## Function `vote` + +Vote on a proposal with a voter's voting power. To successfully vote, the following conditions must be met: +1. The voting period of the proposal hasn't ended. +2. The delegation pool's lockup period ends after the voting period of the proposal. +3. The voter still has spare voting power on this proposal. +4. The delegation pool never votes on the proposal before enabling partial governance voting. + + +
public entry fun vote(voter: &signer, pool_address: address, proposal_id: u64, voting_power: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
public entry fun vote(
+    voter: &signer,
+    pool_address: address,
+    proposal_id: u64,
+    voting_power: u64,
+    should_pass: bool
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert_partial_governance_voting_enabled(pool_address);
+    // synchronize delegation and stake pools before any user operation.
+    synchronize_delegation_pool(pool_address);
+
+    let voter_address = signer::address_of(voter);
+    let remaining_voting_power = calculate_and_update_remaining_voting_power(
+        pool_address,
+        voter_address,
+        proposal_id
+    );
+    if (voting_power > remaining_voting_power) {
+        voting_power = remaining_voting_power;
+    };
+    assert!(voting_power > 0, error::invalid_argument(ENO_VOTING_POWER));
+
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    // Check a edge case during the transient period of enabling partial governance voting.
+    assert_and_update_proposal_used_voting_power(governance_records, pool_address, proposal_id, voting_power);
+    let used_voting_power = borrow_mut_used_voting_power(governance_records, voter_address, proposal_id);
+    *used_voting_power = *used_voting_power + voting_power;
+
+    let pool_signer = retrieve_stake_pool_owner(borrow_global<DelegationPool>(pool_address));
+    aptos_governance::partial_vote(&pool_signer, pool_address, proposal_id, voting_power, should_pass);
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(
+            Vote {
+                voter: voter_address,
+                proposal_id,
+                delegation_pool: pool_address,
+                num_votes: voting_power,
+                should_pass,
+            }
+        );
+    };
+
+    event::emit_event(
+        &mut governance_records.vote_events,
+        VoteEvent {
+            voter: voter_address,
+            proposal_id,
+            delegation_pool: pool_address,
+            num_votes: voting_power,
+            should_pass,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `create_proposal` + +A voter could create a governance proposal by this function. To successfully create a proposal, the voter's +voting power in THIS delegation pool must be not less than the minimum required voting power specified in +aptos_governance.move. + + +
public entry fun create_proposal(voter: &signer, pool_address: address, execution_hash: vector<u8>, metadata_location: vector<u8>, metadata_hash: vector<u8>, is_multi_step_proposal: bool)
+
+ + + +
+Implementation + + +
public entry fun create_proposal(
+    voter: &signer,
+    pool_address: address,
+    execution_hash: vector<u8>,
+    metadata_location: vector<u8>,
+    metadata_hash: vector<u8>,
+    is_multi_step_proposal: bool,
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert_partial_governance_voting_enabled(pool_address);
+
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+
+    let voter_addr = signer::address_of(voter);
+    let pool = borrow_global<DelegationPool>(pool_address);
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let total_voting_power = calculate_and_update_delegated_votes(pool, governance_records, voter_addr);
+    assert!(
+        total_voting_power >= aptos_governance::get_required_proposer_stake(),
+        error::invalid_argument(EINSUFFICIENT_PROPOSER_STAKE));
+    let pool_signer = retrieve_stake_pool_owner(borrow_global<DelegationPool>(pool_address));
+    let proposal_id = aptos_governance::create_proposal_v2_impl(
+        &pool_signer,
+        pool_address,
+        execution_hash,
+        metadata_location,
+        metadata_hash,
+        is_multi_step_proposal,
+    );
+
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(
+            CreateProposal {
+                proposal_id,
+                voter: voter_addr,
+                delegation_pool: pool_address,
+            }
+        );
+    };
+
+    event::emit_event(
+        &mut governance_records.create_proposal_events,
+        CreateProposalEvent {
+            proposal_id,
+            voter: voter_addr,
+            delegation_pool: pool_address,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `assert_owner_cap_exists` + + + +
fun assert_owner_cap_exists(owner: address)
+
+ + + +
+Implementation + + +
fun assert_owner_cap_exists(owner: address) {
+    assert!(owner_cap_exists(owner), error::not_found(EOWNER_CAP_NOT_FOUND));
+}
+
+ + + +
+ + + +## Function `assert_delegation_pool_exists` + + + +
fun assert_delegation_pool_exists(pool_address: address)
+
+ + + +
+Implementation + + +
fun assert_delegation_pool_exists(pool_address: address) {
+    assert!(delegation_pool_exists(pool_address), error::invalid_argument(EDELEGATION_POOL_DOES_NOT_EXIST));
+}
+
+ + + +
+ + + +## Function `assert_min_active_balance` + + + +
fun assert_min_active_balance(pool: &delegation_pool::DelegationPool, delegator_address: address)
+
+ + + +
+Implementation + + +
fun assert_min_active_balance(pool: &DelegationPool, delegator_address: address) {
+    let balance = pool_u64::balance(&pool.active_shares, delegator_address);
+    assert!(balance >= MIN_COINS_ON_SHARES_POOL, error::invalid_argument(EDELEGATOR_ACTIVE_BALANCE_TOO_LOW));
+}
+
+ + + +
+ + + +## Function `assert_min_pending_inactive_balance` + + + +
fun assert_min_pending_inactive_balance(pool: &delegation_pool::DelegationPool, delegator_address: address)
+
+ + + +
+Implementation + + +
fun assert_min_pending_inactive_balance(pool: &DelegationPool, delegator_address: address) {
+    let balance = pool_u64::balance(pending_inactive_shares_pool(pool), delegator_address);
+    assert!(
+        balance >= MIN_COINS_ON_SHARES_POOL,
+        error::invalid_argument(EDELEGATOR_PENDING_INACTIVE_BALANCE_TOO_LOW)
+    );
+}
+
+ + + +
+ + + +## Function `assert_partial_governance_voting_enabled` + + + +
fun assert_partial_governance_voting_enabled(pool_address: address)
+
+ + + +
+Implementation + + +
fun assert_partial_governance_voting_enabled(pool_address: address) {
+    assert_delegation_pool_exists(pool_address);
+    assert!(
+        partial_governance_voting_enabled(pool_address),
+        error::invalid_state(EPARTIAL_GOVERNANCE_VOTING_NOT_ENABLED)
+    );
+}
+
+ + + +
+ + + +## Function `assert_allowlisting_enabled` + + + +
fun assert_allowlisting_enabled(pool_address: address)
+
+ + + +
+Implementation + + +
fun assert_allowlisting_enabled(pool_address: address) {
+    assert!(allowlisting_enabled(pool_address), error::invalid_state(EDELEGATORS_ALLOWLISTING_NOT_ENABLED));
+}
+
+ + + +
+ + + +## Function `assert_delegator_allowlisted` + + + +
fun assert_delegator_allowlisted(pool_address: address, delegator_address: address)
+
+ + + +
+Implementation + + +
fun assert_delegator_allowlisted(
+    pool_address: address,
+    delegator_address: address,
+) acquires DelegationPoolAllowlisting {
+    assert!(
+        delegator_allowlisted(pool_address, delegator_address),
+        error::permission_denied(EDELEGATOR_NOT_ALLOWLISTED)
+    );
+}
+
+ + + +
+ + + +## Function `coins_to_redeem_to_ensure_min_stake` + + + +
fun coins_to_redeem_to_ensure_min_stake(src_shares_pool: &pool_u64_unbound::Pool, shareholder: address, amount: u64): u64
+
+ + + +
+Implementation + + +
fun coins_to_redeem_to_ensure_min_stake(
+    src_shares_pool: &pool_u64::Pool,
+    shareholder: address,
+    amount: u64,
+): u64 {
+    // find how many coins would be redeemed if supplying `amount`
+    let redeemed_coins = pool_u64::shares_to_amount(
+        src_shares_pool,
+        amount_to_shares_to_redeem(src_shares_pool, shareholder, amount)
+    );
+    // if balance drops under threshold then redeem it entirely
+    let src_balance = pool_u64::balance(src_shares_pool, shareholder);
+    if (src_balance - redeemed_coins < MIN_COINS_ON_SHARES_POOL) {
+        amount = src_balance;
+    };
+    amount
+}
+
+ + + +
+ + + +## Function `coins_to_transfer_to_ensure_min_stake` + + + +
fun coins_to_transfer_to_ensure_min_stake(src_shares_pool: &pool_u64_unbound::Pool, dst_shares_pool: &pool_u64_unbound::Pool, shareholder: address, amount: u64): u64
+
+ + + +
+Implementation + + +
fun coins_to_transfer_to_ensure_min_stake(
+    src_shares_pool: &pool_u64::Pool,
+    dst_shares_pool: &pool_u64::Pool,
+    shareholder: address,
+    amount: u64,
+): u64 {
+    // find how many coins would be redeemed from source if supplying `amount`
+    let redeemed_coins = pool_u64::shares_to_amount(
+        src_shares_pool,
+        amount_to_shares_to_redeem(src_shares_pool, shareholder, amount)
+    );
+    // if balance on destination would be less than threshold then redeem difference to threshold
+    let dst_balance = pool_u64::balance(dst_shares_pool, shareholder);
+    if (dst_balance + redeemed_coins < MIN_COINS_ON_SHARES_POOL) {
+        // `redeemed_coins` >= `amount` - 1 as redeem can lose at most 1 coin
+        amount = MIN_COINS_ON_SHARES_POOL - dst_balance + 1;
+    };
+    // check if new `amount` drops balance on source under threshold and adjust
+    coins_to_redeem_to_ensure_min_stake(src_shares_pool, shareholder, amount)
+}
+
+ + + +
+ + + +## Function `retrieve_stake_pool_owner` + +Retrieves the shared resource account owning the stake pool in order +to forward a stake-management operation to this underlying pool. + + +
fun retrieve_stake_pool_owner(pool: &delegation_pool::DelegationPool): signer
+
+ + + +
+Implementation + + +
fun retrieve_stake_pool_owner(pool: &DelegationPool): signer {
+    account::create_signer_with_capability(&pool.stake_pool_signer_cap)
+}
+
+ + + +
+ + + +## Function `get_pool_address` + +Get the address of delegation pool reference pool. + + +
fun get_pool_address(pool: &delegation_pool::DelegationPool): address
+
+ + + +
+Implementation + + +
fun get_pool_address(pool: &DelegationPool): address {
+    account::get_signer_capability_address(&pool.stake_pool_signer_cap)
+}
+
+ + + +
+ + + +## Function `get_delegator_active_shares` + +Get the active share amount of the delegator. + + +
fun get_delegator_active_shares(pool: &delegation_pool::DelegationPool, delegator: address): u128
+
+ + + +
+Implementation + + +
fun get_delegator_active_shares(pool: &DelegationPool, delegator: address): u128 {
+    pool_u64::shares(&pool.active_shares, delegator)
+}
+
+ + + +
+ + + +## Function `get_delegator_pending_inactive_shares` + +Get the pending inactive share amount of the delegator. + + +
fun get_delegator_pending_inactive_shares(pool: &delegation_pool::DelegationPool, delegator: address): u128
+
+ + + +
+Implementation + + +
fun get_delegator_pending_inactive_shares(pool: &DelegationPool, delegator: address): u128 {
+    pool_u64::shares(pending_inactive_shares_pool(pool), delegator)
+}
+
+ + + +
+ + + +## Function `get_used_voting_power` + +Get the used voting power of a voter on a proposal. + + +
fun get_used_voting_power(governance_records: &delegation_pool::GovernanceRecords, voter: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
fun get_used_voting_power(governance_records: &GovernanceRecords, voter: address, proposal_id: u64): u64 {
+    let votes = &governance_records.votes;
+    let key = VotingRecordKey {
+        voter,
+        proposal_id,
+    };
+    *smart_table::borrow_with_default(votes, key, &0)
+}
+
+ + + +
+ + + +## Function `create_resource_account_seed` + +Create the seed to derive the resource account address. + + +
fun create_resource_account_seed(delegation_pool_creation_seed: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun create_resource_account_seed(
+    delegation_pool_creation_seed: vector<u8>,
+): vector<u8> {
+    let seed = vector::empty<u8>();
+    // include module salt (before any subseeds) to avoid conflicts with other modules creating resource accounts
+    vector::append(&mut seed, MODULE_SALT);
+    // include an additional salt in case the same resource account has already been created
+    vector::append(&mut seed, delegation_pool_creation_seed);
+    seed
+}
+
+ + + +
+ + + +## Function `borrow_mut_used_voting_power` + +Borrow the mutable used voting power of a voter on a proposal. + + +
fun borrow_mut_used_voting_power(governance_records: &mut delegation_pool::GovernanceRecords, voter: address, proposal_id: u64): &mut u64
+
+ + + +
+Implementation + + +
inline fun borrow_mut_used_voting_power(
+    governance_records: &mut GovernanceRecords,
+    voter: address,
+    proposal_id: u64
+): &mut u64 {
+    let votes = &mut governance_records.votes;
+    let key = VotingRecordKey {
+        proposal_id,
+        voter,
+    };
+    smart_table::borrow_mut_with_default(votes, key, 0)
+}
+
+ + + +
+ + + +## Function `update_and_borrow_mut_delegator_vote_delegation` + +Update VoteDelegation of a delegator to up-to-date then borrow_mut it. + + +
fun update_and_borrow_mut_delegator_vote_delegation(pool: &delegation_pool::DelegationPool, governance_records: &mut delegation_pool::GovernanceRecords, delegator: address): &mut delegation_pool::VoteDelegation
+
+ + + +
+Implementation + + +
fun update_and_borrow_mut_delegator_vote_delegation(
+    pool: &DelegationPool,
+    governance_records: &mut GovernanceRecords,
+    delegator: address
+): &mut VoteDelegation {
+    let pool_address = get_pool_address(pool);
+    let locked_until_secs = stake::get_lockup_secs(pool_address);
+
+    let vote_delegation_table = &mut governance_records.vote_delegation;
+    // By default, a delegator's delegated voter is itself.
+    // TODO: recycle storage when VoteDelegation equals to default value.
+    if (!smart_table::contains(vote_delegation_table, delegator)) {
+        return smart_table::borrow_mut_with_default(vote_delegation_table, delegator, VoteDelegation {
+            voter: delegator,
+            last_locked_until_secs: locked_until_secs,
+            pending_voter: delegator,
+        })
+    };
+
+    let vote_delegation = smart_table::borrow_mut(vote_delegation_table, delegator);
+    // A lockup period has passed since last time `vote_delegation` was updated. Pending voter takes effect.
+    if (vote_delegation.last_locked_until_secs < locked_until_secs) {
+        vote_delegation.voter = vote_delegation.pending_voter;
+        vote_delegation.last_locked_until_secs = locked_until_secs;
+    };
+    vote_delegation
+}
+
+ + + +
+ + + +## Function `update_and_borrow_mut_delegated_votes` + +Update DelegatedVotes of a voter to up-to-date then borrow_mut it. + + +
fun update_and_borrow_mut_delegated_votes(pool: &delegation_pool::DelegationPool, governance_records: &mut delegation_pool::GovernanceRecords, voter: address): &mut delegation_pool::DelegatedVotes
+
+ + + +
+Implementation + + +
fun update_and_borrow_mut_delegated_votes(
+    pool: &DelegationPool,
+    governance_records: &mut GovernanceRecords,
+    voter: address
+): &mut DelegatedVotes {
+    let pool_address = get_pool_address(pool);
+    let locked_until_secs = stake::get_lockup_secs(pool_address);
+
+    let delegated_votes_per_voter = &mut governance_records.delegated_votes;
+    // By default, a delegator's voter is itself.
+    // TODO: recycle storage when DelegatedVotes equals to default value.
+    if (!smart_table::contains(delegated_votes_per_voter, voter)) {
+        let active_shares = get_delegator_active_shares(pool, voter);
+        let inactive_shares = get_delegator_pending_inactive_shares(pool, voter);
+        return smart_table::borrow_mut_with_default(delegated_votes_per_voter, voter, DelegatedVotes {
+            active_shares,
+            pending_inactive_shares: inactive_shares,
+            active_shares_next_lockup: active_shares,
+            last_locked_until_secs: locked_until_secs,
+        })
+    };
+
+    let delegated_votes = smart_table::borrow_mut(delegated_votes_per_voter, voter);
+    // A lockup period has passed since last time `delegated_votes` was updated. Pending voter takes effect.
+    if (delegated_votes.last_locked_until_secs < locked_until_secs) {
+        delegated_votes.active_shares = delegated_votes.active_shares_next_lockup;
+        delegated_votes.pending_inactive_shares = 0;
+        delegated_votes.last_locked_until_secs = locked_until_secs;
+    };
+    delegated_votes
+}
+
+ + + +
+ + + +## Function `olc_with_index` + + + +
fun olc_with_index(index: u64): delegation_pool::ObservedLockupCycle
+
+ + + +
+Implementation + + +
fun olc_with_index(index: u64): ObservedLockupCycle {
+    ObservedLockupCycle { index }
+}
+
+ + + +
+ + + +## Function `calculate_total_voting_power` + +Given the amounts of shares in active_shares pool and inactive_shares pool, calculate the total voting +power, which equals to the sum of the coin amounts. + + +
fun calculate_total_voting_power(delegation_pool: &delegation_pool::DelegationPool, latest_delegated_votes: &delegation_pool::DelegatedVotes): u64
+
+ + + +
+Implementation + + +
fun calculate_total_voting_power(delegation_pool: &DelegationPool, latest_delegated_votes: &DelegatedVotes): u64 {
+    let active_amount = pool_u64::shares_to_amount(
+        &delegation_pool.active_shares,
+        latest_delegated_votes.active_shares);
+    let pending_inactive_amount = pool_u64::shares_to_amount(
+        pending_inactive_shares_pool(delegation_pool),
+        latest_delegated_votes.pending_inactive_shares);
+    active_amount + pending_inactive_amount
+}
+
+ + + +
+ + + +## Function `calculate_and_update_delegator_voter_internal` + +Update VoteDelegation of a delegator to up-to-date then return the latest voter. + + +
fun calculate_and_update_delegator_voter_internal(pool: &delegation_pool::DelegationPool, governance_records: &mut delegation_pool::GovernanceRecords, delegator: address): address
+
+ + + +
+Implementation + + +
fun calculate_and_update_delegator_voter_internal(
+    pool: &DelegationPool,
+    governance_records: &mut GovernanceRecords,
+    delegator: address
+): address {
+    let vote_delegation = update_and_borrow_mut_delegator_vote_delegation(pool, governance_records, delegator);
+    vote_delegation.voter
+}
+
+ + + +
+ + + +## Function `calculate_and_update_delegated_votes` + +Update DelegatedVotes of a voter to up-to-date then return the total voting power of this voter. + + +
fun calculate_and_update_delegated_votes(pool: &delegation_pool::DelegationPool, governance_records: &mut delegation_pool::GovernanceRecords, voter: address): u64
+
+ + + +
+Implementation + + +
fun calculate_and_update_delegated_votes(
+    pool: &DelegationPool,
+    governance_records: &mut GovernanceRecords,
+    voter: address
+): u64 {
+    let delegated_votes = update_and_borrow_mut_delegated_votes(pool, governance_records, voter);
+    calculate_total_voting_power(pool, delegated_votes)
+}
+
+ + + +
+ + + +## Function `borrow_mut_delegators_allowlist` + + + +
fun borrow_mut_delegators_allowlist(pool_address: address): &mut smart_table::SmartTable<address, bool>
+
+ + + +
+Implementation + + +
inline fun borrow_mut_delegators_allowlist(
+    pool_address: address
+): &mut SmartTable<address, bool> acquires DelegationPoolAllowlisting {
+    &mut borrow_global_mut<DelegationPoolAllowlisting>(pool_address).allowlist
+}
+
+ + + +
+ + + +## Function `set_operator` + +Allows an owner to change the operator of the underlying stake pool. + + +
public entry fun set_operator(owner: &signer, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun set_operator(
+    owner: &signer,
+    new_operator: address
+) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    // synchronize delegation and stake pools before any user operation
+    // ensure the old operator is paid its uncommitted commission rewards
+    synchronize_delegation_pool(pool_address);
+    stake::set_operator(&retrieve_stake_pool_owner(borrow_global<DelegationPool>(pool_address)), new_operator);
+}
+
+ + + +
+ + + +## Function `set_beneficiary_for_operator` + +Allows an operator to change its beneficiary. Any existing unpaid commission rewards will be paid to the new +beneficiary. To ensure payment to the current beneficiary, one should first call synchronize_delegation_pool +before switching the beneficiary. An operator can set one beneficiary for delegation pools, not a separate +one for each pool. + + +
public entry fun set_beneficiary_for_operator(operator: &signer, new_beneficiary: address)
+
+ + + +
+Implementation + + +
public entry fun set_beneficiary_for_operator(
+    operator: &signer,
+    new_beneficiary: address
+) acquires BeneficiaryForOperator {
+    assert!(features::operator_beneficiary_change_enabled(), std::error::invalid_state(
+        EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED
+    ));
+    // The beneficiay address of an operator is stored under the operator's address.
+    // So, the operator does not need to be validated with respect to a staking pool.
+    let operator_addr = signer::address_of(operator);
+    let old_beneficiary = beneficiary_for_operator(operator_addr);
+    if (exists<BeneficiaryForOperator>(operator_addr)) {
+        borrow_global_mut<BeneficiaryForOperator>(operator_addr).beneficiary_for_operator = new_beneficiary;
+    } else {
+        move_to(operator, BeneficiaryForOperator { beneficiary_for_operator: new_beneficiary });
+    };
+
+    emit(SetBeneficiaryForOperator {
+        operator: operator_addr,
+        old_beneficiary,
+        new_beneficiary,
+    });
+}
+
+ + + +
+ + + +## Function `update_commission_percentage` + +Allows an owner to update the commission percentage for the operator of the underlying stake pool. + + +
public entry fun update_commission_percentage(owner: &signer, new_commission_percentage: u64)
+
+ + + +
+Implementation + + +
public entry fun update_commission_percentage(
+    owner: &signer,
+    new_commission_percentage: u64
+) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert!(features::commission_change_delegation_pool_enabled(), error::invalid_state(
+        ECOMMISSION_RATE_CHANGE_NOT_SUPPORTED
+    ));
+    assert!(new_commission_percentage <= MAX_FEE, error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE));
+    let owner_address = signer::address_of(owner);
+    let pool_address = get_owned_pool_address(owner_address);
+    assert!(
+        operator_commission_percentage(pool_address) + MAX_COMMISSION_INCREASE >= new_commission_percentage,
+        error::invalid_argument(ETOO_LARGE_COMMISSION_INCREASE)
+    );
+    assert!(
+        stake::get_remaining_lockup_secs(pool_address) >= min_remaining_secs_for_commission_change(),
+        error::invalid_state(ETOO_LATE_COMMISSION_CHANGE)
+    );
+
+    // synchronize delegation and stake pools before any user operation. this ensures:
+    // (1) the operator is paid its uncommitted commission rewards with the old commission percentage, and
+    // (2) any pending commission percentage change is applied before the new commission percentage is set.
+    synchronize_delegation_pool(pool_address);
+
+    if (exists<NextCommissionPercentage>(pool_address)) {
+        let commission_percentage = borrow_global_mut<NextCommissionPercentage>(pool_address);
+        commission_percentage.commission_percentage_next_lockup_cycle = new_commission_percentage;
+        commission_percentage.effective_after_secs = stake::get_lockup_secs(pool_address);
+    } else {
+        let delegation_pool = borrow_global<DelegationPool>(pool_address);
+        let pool_signer = account::create_signer_with_capability(&delegation_pool.stake_pool_signer_cap);
+        move_to(&pool_signer, NextCommissionPercentage {
+            commission_percentage_next_lockup_cycle: new_commission_percentage,
+            effective_after_secs: stake::get_lockup_secs(pool_address),
+        });
+    };
+
+    event::emit(CommissionPercentageChange {
+        pool_address,
+        owner: owner_address,
+        commission_percentage_next_lockup_cycle: new_commission_percentage,
+    });
+}
+
+ + + +
+ + + +## Function `set_delegated_voter` + +Allows an owner to change the delegated voter of the underlying stake pool. + + +
public entry fun set_delegated_voter(owner: &signer, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun set_delegated_voter(
+    owner: &signer,
+    new_voter: address
+) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    // No one can change delegated_voter once the partial governance voting feature is enabled.
+    assert!(
+        !features::delegation_pool_partial_governance_voting_enabled(),
+        error::invalid_state(EDEPRECATED_FUNCTION)
+    );
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+    stake::set_delegated_voter(&retrieve_stake_pool_owner(borrow_global<DelegationPool>(pool_address)), new_voter);
+}
+
+ + + +
+ + + +## Function `delegate_voting_power` + +Allows a delegator to delegate its voting power to a voter. If this delegator already has a delegated voter, +this change won't take effects until the next lockup period. + + +
public entry fun delegate_voting_power(delegator: &signer, pool_address: address, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun delegate_voting_power(
+    delegator: &signer,
+    pool_address: address,
+    new_voter: address
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert_partial_governance_voting_enabled(pool_address);
+
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+
+    let delegator_address = signer::address_of(delegator);
+    let delegation_pool = borrow_global<DelegationPool>(pool_address);
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let delegator_vote_delegation = update_and_borrow_mut_delegator_vote_delegation(
+        delegation_pool,
+        governance_records,
+        delegator_address
+    );
+    let pending_voter: address = delegator_vote_delegation.pending_voter;
+
+    // No need to update if the voter doesn't really change.
+    if (pending_voter != new_voter) {
+        delegator_vote_delegation.pending_voter = new_voter;
+        let active_shares = get_delegator_active_shares(delegation_pool, delegator_address);
+        // <active shares> of <pending voter of shareholder> -= <active_shares>
+        // <active shares> of <new voter of shareholder> += <active_shares>
+        let pending_delegated_votes = update_and_borrow_mut_delegated_votes(
+            delegation_pool,
+            governance_records,
+            pending_voter
+        );
+        pending_delegated_votes.active_shares_next_lockup =
+            pending_delegated_votes.active_shares_next_lockup - active_shares;
+
+        let new_delegated_votes = update_and_borrow_mut_delegated_votes(
+            delegation_pool,
+            governance_records,
+            new_voter
+        );
+        new_delegated_votes.active_shares_next_lockup =
+            new_delegated_votes.active_shares_next_lockup + active_shares;
+    };
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(DelegateVotingPower {
+            pool_address,
+            delegator: delegator_address,
+            voter: new_voter,
+        })
+    };
+
+    event::emit_event(&mut governance_records.delegate_voting_power_events, DelegateVotingPowerEvent {
+        pool_address,
+        delegator: delegator_address,
+        voter: new_voter,
+    });
+}
+
+ + + +
+ + + +## Function `enable_delegators_allowlisting` + +Enable delegators allowlisting as the pool owner. + + +
public entry fun enable_delegators_allowlisting(owner: &signer)
+
+ + + +
+Implementation + + +
public entry fun enable_delegators_allowlisting(
+    owner: &signer,
+) acquires DelegationPoolOwnership, DelegationPool {
+    assert!(
+        features::delegation_pool_allowlisting_enabled(),
+        error::invalid_state(EDELEGATORS_ALLOWLISTING_NOT_SUPPORTED)
+    );
+
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    if (allowlisting_enabled(pool_address)) { return };
+
+    let pool_signer = retrieve_stake_pool_owner(borrow_global<DelegationPool>(pool_address));
+    move_to(&pool_signer, DelegationPoolAllowlisting { allowlist: smart_table::new<address, bool>() });
+
+    event::emit(EnableDelegatorsAllowlisting { pool_address });
+}
+
+ + + +
+ + + +## Function `disable_delegators_allowlisting` + +Disable delegators allowlisting as the pool owner. The existing allowlist will be emptied. + + +
public entry fun disable_delegators_allowlisting(owner: &signer)
+
+ + + +
+Implementation + + +
public entry fun disable_delegators_allowlisting(
+    owner: &signer,
+) acquires DelegationPoolOwnership, DelegationPoolAllowlisting {
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    assert_allowlisting_enabled(pool_address);
+
+    let DelegationPoolAllowlisting { allowlist } = move_from<DelegationPoolAllowlisting>(pool_address);
+    // if the allowlist becomes too large, the owner can always remove some delegators
+    smart_table::destroy(allowlist);
+
+    event::emit(DisableDelegatorsAllowlisting { pool_address });
+}
+
+ + + +
+ + + +## Function `allowlist_delegator` + +Allowlist a delegator as the pool owner. + + +
public entry fun allowlist_delegator(owner: &signer, delegator_address: address)
+
+ + + +
+Implementation + + +
public entry fun allowlist_delegator(
+    owner: &signer,
+    delegator_address: address,
+) acquires DelegationPoolOwnership, DelegationPoolAllowlisting {
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    assert_allowlisting_enabled(pool_address);
+
+    if (delegator_allowlisted(pool_address, delegator_address)) { return };
+
+    smart_table::add(borrow_mut_delegators_allowlist(pool_address), delegator_address, true);
+
+    event::emit(AllowlistDelegator { pool_address, delegator_address });
+}
+
+ + + +
+ + + +## Function `remove_delegator_from_allowlist` + +Remove a delegator from the allowlist as the pool owner, but do not unlock their stake. + + +
public entry fun remove_delegator_from_allowlist(owner: &signer, delegator_address: address)
+
+ + + +
+Implementation + + +
public entry fun remove_delegator_from_allowlist(
+    owner: &signer,
+    delegator_address: address,
+) acquires DelegationPoolOwnership, DelegationPoolAllowlisting {
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    assert_allowlisting_enabled(pool_address);
+
+    if (!delegator_allowlisted(pool_address, delegator_address)) { return };
+
+    smart_table::remove(borrow_mut_delegators_allowlist(pool_address), delegator_address);
+
+    event::emit(RemoveDelegatorFromAllowlist { pool_address, delegator_address });
+}
+
+ + + +
+ + + +## Function `evict_delegator` + +Evict a delegator that is not allowlisted by unlocking their entire stake. + + +
public entry fun evict_delegator(owner: &signer, delegator_address: address)
+
+ + + +
+Implementation + + +
public entry fun evict_delegator(
+    owner: &signer,
+    delegator_address: address,
+) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting {
+    let pool_address = get_owned_pool_address(signer::address_of(owner));
+    assert_allowlisting_enabled(pool_address);
+    assert!(
+        !delegator_allowlisted(pool_address, delegator_address),
+        error::invalid_state(ECANNOT_EVICT_ALLOWLISTED_DELEGATOR)
+    );
+
+    // synchronize pool in order to query latest balance of delegator
+    synchronize_delegation_pool(pool_address);
+
+    let pool = borrow_global<DelegationPool>(pool_address);
+    if (get_delegator_active_shares(pool, delegator_address) == 0) { return };
+
+    unlock_internal(delegator_address, pool_address, pool_u64::balance(&pool.active_shares, delegator_address));
+
+    event::emit(EvictDelegator { pool_address, delegator_address });
+}
+
+ + + +
+ + + +## Function `add_stake` + +Add amount of coins to the delegation pool pool_address. + + +
public entry fun add_stake(delegator: &signer, pool_address: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun add_stake(
+    delegator: &signer,
+    pool_address: address,
+    amount: u64
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting {
+    // short-circuit if amount to add is 0 so no event is emitted
+    if (amount == 0) { return };
+
+    let delegator_address = signer::address_of(delegator);
+    assert_delegator_allowlisted(pool_address, delegator_address);
+
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+
+    // fee to be charged for adding `amount` stake on this delegation pool at this epoch
+    let add_stake_fee = get_add_stake_fee(pool_address, amount);
+
+    let pool = borrow_global_mut<DelegationPool>(pool_address);
+
+    // stake the entire amount to the stake pool
+    aptos_account::transfer(delegator, pool_address, amount);
+    stake::add_stake(&retrieve_stake_pool_owner(pool), amount);
+
+    // but buy shares for delegator just for the remaining amount after fee
+    buy_in_active_shares(pool, delegator_address, amount - add_stake_fee);
+    assert_min_active_balance(pool, delegator_address);
+
+    // grant temporary ownership over `add_stake` fees to a separate shareholder in order to:
+    // - not mistake them for rewards to pay the operator from
+    // - distribute them together with the `active` rewards when this epoch ends
+    // in order to appreciate all shares on the active pool atomically
+    buy_in_active_shares(pool, NULL_SHAREHOLDER, add_stake_fee);
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(
+            AddStake {
+                pool_address,
+                delegator_address,
+                amount_added: amount,
+                add_stake_fee,
+            },
+        );
+    };
+
+    event::emit_event(
+        &mut pool.add_stake_events,
+        AddStakeEvent {
+            pool_address,
+            delegator_address,
+            amount_added: amount,
+            add_stake_fee,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `unlock` + +Unlock amount from the active + pending_active stake of delegator or +at most how much active stake there is on the stake pool. + + +
public entry fun unlock(delegator: &signer, pool_address: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun unlock(
+    delegator: &signer,
+    pool_address: address,
+    amount: u64
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    // short-circuit if amount to unlock is 0 so no event is emitted
+    if (amount == 0) { return };
+
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+
+    let delegator_address = signer::address_of(delegator);
+    unlock_internal(delegator_address, pool_address, amount);
+}
+
+ + + +
+ + + +## Function `unlock_internal` + + + +
fun unlock_internal(delegator_address: address, pool_address: address, amount: u64)
+
+ + + +
+Implementation + + +
fun unlock_internal(
+    delegator_address: address,
+    pool_address: address,
+    amount: u64
+) acquires DelegationPool, GovernanceRecords {
+    assert!(delegator_address != NULL_SHAREHOLDER, error::invalid_argument(ECANNOT_UNLOCK_NULL_SHAREHOLDER));
+
+    // fail unlock of more stake than `active` on the stake pool
+    let (active, _, _, _) = stake::get_stake(pool_address);
+    assert!(amount <= active, error::invalid_argument(ENOT_ENOUGH_ACTIVE_STAKE_TO_UNLOCK));
+
+    let pool = borrow_global_mut<DelegationPool>(pool_address);
+    amount = coins_to_transfer_to_ensure_min_stake(
+        &pool.active_shares,
+        pending_inactive_shares_pool(pool),
+        delegator_address,
+        amount,
+    );
+    amount = redeem_active_shares(pool, delegator_address, amount);
+
+    stake::unlock(&retrieve_stake_pool_owner(pool), amount);
+
+    buy_in_pending_inactive_shares(pool, delegator_address, amount);
+    assert_min_pending_inactive_balance(pool, delegator_address);
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(
+            UnlockStake {
+                pool_address,
+                delegator_address,
+                amount_unlocked: amount,
+            },
+        );
+    };
+
+    event::emit_event(
+        &mut pool.unlock_stake_events,
+        UnlockStakeEvent {
+            pool_address,
+            delegator_address,
+            amount_unlocked: amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `reactivate_stake` + +Move amount of coins from pending_inactive to active. + + +
public entry fun reactivate_stake(delegator: &signer, pool_address: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun reactivate_stake(
+    delegator: &signer,
+    pool_address: address,
+    amount: u64
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting {
+    // short-circuit if amount to reactivate is 0 so no event is emitted
+    if (amount == 0) { return };
+
+    let delegator_address = signer::address_of(delegator);
+    assert_delegator_allowlisted(pool_address, delegator_address);
+
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+
+    let pool = borrow_global_mut<DelegationPool>(pool_address);
+    amount = coins_to_transfer_to_ensure_min_stake(
+        pending_inactive_shares_pool(pool),
+        &pool.active_shares,
+        delegator_address,
+        amount,
+    );
+    let observed_lockup_cycle = pool.observed_lockup_cycle;
+    amount = redeem_inactive_shares(pool, delegator_address, amount, observed_lockup_cycle);
+
+    stake::reactivate_stake(&retrieve_stake_pool_owner(pool), amount);
+
+    buy_in_active_shares(pool, delegator_address, amount);
+    assert_min_active_balance(pool, delegator_address);
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(
+            ReactivateStake {
+                pool_address,
+                delegator_address,
+                amount_reactivated: amount,
+            },
+        );
+    };
+
+    event::emit_event(
+        &mut pool.reactivate_stake_events,
+        ReactivateStakeEvent {
+            pool_address,
+            delegator_address,
+            amount_reactivated: amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `withdraw` + +Withdraw amount of owned inactive stake from the delegation pool at pool_address. + + +
public entry fun withdraw(delegator: &signer, pool_address: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun withdraw(
+    delegator: &signer,
+    pool_address: address,
+    amount: u64
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert!(amount > 0, error::invalid_argument(EWITHDRAW_ZERO_STAKE));
+    // synchronize delegation and stake pools before any user operation
+    synchronize_delegation_pool(pool_address);
+    withdraw_internal(borrow_global_mut<DelegationPool>(pool_address), signer::address_of(delegator), amount);
+}
+
+ + + +
+ + + +## Function `withdraw_internal` + + + +
fun withdraw_internal(pool: &mut delegation_pool::DelegationPool, delegator_address: address, amount: u64)
+
+ + + +
+Implementation + + +
fun withdraw_internal(
+    pool: &mut DelegationPool,
+    delegator_address: address,
+    amount: u64
+) acquires GovernanceRecords {
+    // TODO: recycle storage when a delegator fully exits the delegation pool.
+    // short-circuit if amount to withdraw is 0 so no event is emitted
+    if (amount == 0) { return };
+
+    let pool_address = get_pool_address(pool);
+    let (withdrawal_exists, withdrawal_olc) = pending_withdrawal_exists(pool, delegator_address);
+    // exit if no withdrawal or (it is pending and cannot withdraw pending_inactive stake from stake pool)
+    if (!(
+        withdrawal_exists &&
+            (withdrawal_olc.index < pool.observed_lockup_cycle.index || can_withdraw_pending_inactive(pool_address))
+    )) { return };
+
+    if (withdrawal_olc.index == pool.observed_lockup_cycle.index) {
+        amount = coins_to_redeem_to_ensure_min_stake(
+            pending_inactive_shares_pool(pool),
+            delegator_address,
+            amount,
+        )
+    };
+    amount = redeem_inactive_shares(pool, delegator_address, amount, withdrawal_olc);
+
+    let stake_pool_owner = &retrieve_stake_pool_owner(pool);
+    // stake pool will inactivate entire pending_inactive stake at `stake::withdraw` to make it withdrawable
+    // however, bypassing the inactivation of excess stake (inactivated but not withdrawn) ensures
+    // the OLC is not advanced indefinitely on `unlock`-`withdraw` paired calls
+    if (can_withdraw_pending_inactive(pool_address)) {
+        // get excess stake before being entirely inactivated
+        let (_, _, _, pending_inactive) = stake::get_stake(pool_address);
+        if (withdrawal_olc.index == pool.observed_lockup_cycle.index) {
+            // `amount` less excess if withdrawing pending_inactive stake
+            pending_inactive = pending_inactive - amount
+        };
+        // escape excess stake from inactivation
+        stake::reactivate_stake(stake_pool_owner, pending_inactive);
+        stake::withdraw(stake_pool_owner, amount);
+        // restore excess stake to the pending_inactive state
+        stake::unlock(stake_pool_owner, pending_inactive);
+    } else {
+        // no excess stake if `stake::withdraw` does not inactivate at all
+        stake::withdraw(stake_pool_owner, amount);
+    };
+    aptos_account::transfer(stake_pool_owner, delegator_address, amount);
+
+    // commit withdrawal of possibly inactive stake to the `total_coins_inactive`
+    // known by the delegation pool in order to not mistake it for slashing at next synchronization
+    let (_, inactive, _, _) = stake::get_stake(pool_address);
+    pool.total_coins_inactive = inactive;
+
+    if (features::module_event_migration_enabled()) {
+        event::emit(
+            WithdrawStake {
+                pool_address,
+                delegator_address,
+                amount_withdrawn: amount,
+            },
+        );
+    };
+
+    event::emit_event(
+        &mut pool.withdraw_stake_events,
+        WithdrawStakeEvent {
+            pool_address,
+            delegator_address,
+            amount_withdrawn: amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `pending_withdrawal_exists` + +Return the unique observed lockup cycle where delegator delegator_address may have +unlocking (or already unlocked) stake to be withdrawn from delegation pool pool. +A bool is returned to signal if a pending withdrawal exists at all. + + +
fun pending_withdrawal_exists(pool: &delegation_pool::DelegationPool, delegator_address: address): (bool, delegation_pool::ObservedLockupCycle)
+
+ + + +
+Implementation + + +
fun pending_withdrawal_exists(pool: &DelegationPool, delegator_address: address): (bool, ObservedLockupCycle) {
+    if (table::contains(&pool.pending_withdrawals, delegator_address)) {
+        (true, *table::borrow(&pool.pending_withdrawals, delegator_address))
+    } else {
+        (false, olc_with_index(0))
+    }
+}
+
+ + + +
+ + + +## Function `pending_inactive_shares_pool_mut` + +Return a mutable reference to the shares pool of pending_inactive stake on the +delegation pool, always the last item in inactive_shares. + + +
fun pending_inactive_shares_pool_mut(pool: &mut delegation_pool::DelegationPool): &mut pool_u64_unbound::Pool
+
+ + + +
+Implementation + + +
fun pending_inactive_shares_pool_mut(pool: &mut DelegationPool): &mut pool_u64::Pool {
+    let observed_lockup_cycle = pool.observed_lockup_cycle;
+    table::borrow_mut(&mut pool.inactive_shares, observed_lockup_cycle)
+}
+
+ + + +
+ + + +## Function `pending_inactive_shares_pool` + + + +
fun pending_inactive_shares_pool(pool: &delegation_pool::DelegationPool): &pool_u64_unbound::Pool
+
+ + + +
+Implementation + + +
fun pending_inactive_shares_pool(pool: &DelegationPool): &pool_u64::Pool {
+    table::borrow(&pool.inactive_shares, pool.observed_lockup_cycle)
+}
+
+ + + +
+ + + +## Function `execute_pending_withdrawal` + +Execute the pending withdrawal of delegator_address on delegation pool pool +if existing and already inactive to allow the creation of a new one. +pending_inactive stake would be left untouched even if withdrawable and should +be explicitly withdrawn by delegator + + +
fun execute_pending_withdrawal(pool: &mut delegation_pool::DelegationPool, delegator_address: address)
+
+ + + +
+Implementation + + +
fun execute_pending_withdrawal(pool: &mut DelegationPool, delegator_address: address) acquires GovernanceRecords {
+    let (withdrawal_exists, withdrawal_olc) = pending_withdrawal_exists(pool, delegator_address);
+    if (withdrawal_exists && withdrawal_olc.index < pool.observed_lockup_cycle.index) {
+        withdraw_internal(pool, delegator_address, MAX_U64);
+    }
+}
+
+ + + +
+ + + +## Function `buy_in_active_shares` + +Buy shares into the active pool on behalf of delegator shareholder who +deposited coins_amount. This function doesn't make any coin transfer. + + +
fun buy_in_active_shares(pool: &mut delegation_pool::DelegationPool, shareholder: address, coins_amount: u64): u128
+
+ + + +
+Implementation + + +
fun buy_in_active_shares(
+    pool: &mut DelegationPool,
+    shareholder: address,
+    coins_amount: u64,
+): u128 acquires GovernanceRecords {
+    let new_shares = pool_u64::amount_to_shares(&pool.active_shares, coins_amount);
+    // No need to buy 0 shares.
+    if (new_shares == 0) { return 0 };
+
+    // Always update governance records before any change to the shares pool.
+    let pool_address = get_pool_address(pool);
+    if (partial_governance_voting_enabled(pool_address)) {
+        update_governance_records_for_buy_in_active_shares(pool, pool_address, new_shares, shareholder);
+    };
+
+    pool_u64::buy_in(&mut pool.active_shares, shareholder, coins_amount);
+    new_shares
+}
+
+ + + +
+ + + +## Function `buy_in_pending_inactive_shares` + +Buy shares into the pending_inactive pool on behalf of delegator shareholder who +redeemed coins_amount from the active pool to schedule it for unlocking. +If delegator's pending withdrawal exists and has been inactivated, execute it firstly +to ensure there is always only one withdrawal request. + + +
fun buy_in_pending_inactive_shares(pool: &mut delegation_pool::DelegationPool, shareholder: address, coins_amount: u64): u128
+
+ + + +
+Implementation + + +
fun buy_in_pending_inactive_shares(
+    pool: &mut DelegationPool,
+    shareholder: address,
+    coins_amount: u64,
+): u128 acquires GovernanceRecords {
+    let new_shares = pool_u64::amount_to_shares(pending_inactive_shares_pool(pool), coins_amount);
+    // never create a new pending withdrawal unless delegator owns some pending_inactive shares
+    if (new_shares == 0) { return 0 };
+
+    // Always update governance records before any change to the shares pool.
+    let pool_address = get_pool_address(pool);
+    if (partial_governance_voting_enabled(pool_address)) {
+        update_governance_records_for_buy_in_pending_inactive_shares(pool, pool_address, new_shares, shareholder);
+    };
+
+    // cannot buy inactive shares, only pending_inactive at current lockup cycle
+    pool_u64::buy_in(pending_inactive_shares_pool_mut(pool), shareholder, coins_amount);
+
+    // execute the pending withdrawal if exists and is inactive before creating a new one
+    execute_pending_withdrawal(pool, shareholder);
+
+    // save observed lockup cycle for the new pending withdrawal
+    let observed_lockup_cycle = pool.observed_lockup_cycle;
+    assert!(*table::borrow_mut_with_default(
+        &mut pool.pending_withdrawals,
+        shareholder,
+        observed_lockup_cycle
+    ) == observed_lockup_cycle,
+        error::invalid_state(EPENDING_WITHDRAWAL_EXISTS)
+    );
+
+    new_shares
+}
+
+ + + +
+ + + +## Function `amount_to_shares_to_redeem` + +Convert coins_amount of coins to be redeemed from shares pool shares_pool +to the exact number of shares to redeem in order to achieve this. + + +
fun amount_to_shares_to_redeem(shares_pool: &pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
+ + + +
+Implementation + + +
fun amount_to_shares_to_redeem(
+    shares_pool: &pool_u64::Pool,
+    shareholder: address,
+    coins_amount: u64,
+): u128 {
+    if (coins_amount >= pool_u64::balance(shares_pool, shareholder)) {
+        // cap result at total shares of shareholder to pass `EINSUFFICIENT_SHARES` on subsequent redeem
+        pool_u64::shares(shares_pool, shareholder)
+    } else {
+        pool_u64::amount_to_shares(shares_pool, coins_amount)
+    }
+}
+
+ + + +
+ + + +## Function `redeem_active_shares` + +Redeem shares from the active pool on behalf of delegator shareholder who +wants to unlock coins_amount of its active stake. +Extracted coins will be used to buy shares into the pending_inactive pool and +be available for withdrawal when current OLC ends. + + +
fun redeem_active_shares(pool: &mut delegation_pool::DelegationPool, shareholder: address, coins_amount: u64): u64
+
+ + + +
+Implementation + + +
fun redeem_active_shares(
+    pool: &mut DelegationPool,
+    shareholder: address,
+    coins_amount: u64,
+): u64 acquires GovernanceRecords {
+    let shares_to_redeem = amount_to_shares_to_redeem(&pool.active_shares, shareholder, coins_amount);
+    // silently exit if not a shareholder otherwise redeem would fail with `ESHAREHOLDER_NOT_FOUND`
+    if (shares_to_redeem == 0) return 0;
+
+    // Always update governance records before any change to the shares pool.
+    let pool_address = get_pool_address(pool);
+    if (partial_governance_voting_enabled(pool_address)) {
+        update_governanace_records_for_redeem_active_shares(pool, pool_address, shares_to_redeem, shareholder);
+    };
+
+    pool_u64::redeem_shares(&mut pool.active_shares, shareholder, shares_to_redeem)
+}
+
+ + + +
+ + + +## Function `redeem_inactive_shares` + +Redeem shares from the inactive pool at lockup_cycle < current OLC on behalf of +delegator shareholder who wants to withdraw coins_amount of its unlocked stake. +Redeem shares from the pending_inactive pool at lockup_cycle == current OLC on behalf of +delegator shareholder who wants to reactivate coins_amount of its unlocking stake. +For latter case, extracted coins will be used to buy shares into the active pool and +escape inactivation when current lockup ends. + + +
fun redeem_inactive_shares(pool: &mut delegation_pool::DelegationPool, shareholder: address, coins_amount: u64, lockup_cycle: delegation_pool::ObservedLockupCycle): u64
+
+ + + +
+Implementation + + +
fun redeem_inactive_shares(
+    pool: &mut DelegationPool,
+    shareholder: address,
+    coins_amount: u64,
+    lockup_cycle: ObservedLockupCycle,
+): u64 acquires GovernanceRecords {
+    let shares_to_redeem = amount_to_shares_to_redeem(
+        table::borrow(&pool.inactive_shares, lockup_cycle),
+        shareholder,
+        coins_amount);
+    // silently exit if not a shareholder otherwise redeem would fail with `ESHAREHOLDER_NOT_FOUND`
+    if (shares_to_redeem == 0) return 0;
+
+    // Always update governance records before any change to the shares pool.
+    let pool_address = get_pool_address(pool);
+    // Only redeem shares from the pending_inactive pool at `lockup_cycle` == current OLC.
+    if (partial_governance_voting_enabled(pool_address) && lockup_cycle.index == pool.observed_lockup_cycle.index) {
+        update_governanace_records_for_redeem_pending_inactive_shares(
+            pool,
+            pool_address,
+            shares_to_redeem,
+            shareholder
+        );
+    };
+
+    let inactive_shares = table::borrow_mut(&mut pool.inactive_shares, lockup_cycle);
+    // 1. reaching here means delegator owns inactive/pending_inactive shares at OLC `lockup_cycle`
+    let redeemed_coins = pool_u64::redeem_shares(inactive_shares, shareholder, shares_to_redeem);
+
+    // if entirely reactivated pending_inactive stake or withdrawn inactive one,
+    // re-enable unlocking for delegator by deleting this pending withdrawal
+    if (pool_u64::shares(inactive_shares, shareholder) == 0) {
+        // 2. a delegator owns inactive/pending_inactive shares only at the OLC of its pending withdrawal
+        // 1 & 2: the pending withdrawal itself has been emptied of shares and can be safely deleted
+        table::remove(&mut pool.pending_withdrawals, shareholder);
+    };
+    // destroy inactive shares pool of past OLC if all its stake has been withdrawn
+    if (lockup_cycle.index < pool.observed_lockup_cycle.index && total_coins(inactive_shares) == 0) {
+        pool_u64::destroy_empty(table::remove(&mut pool.inactive_shares, lockup_cycle));
+    };
+
+    redeemed_coins
+}
+
+ + + +
+ + + +## Function `calculate_stake_pool_drift` + +Calculate stake deviations between the delegation and stake pools in order to +capture the rewards earned in the meantime, resulted operator commission and +whether the lockup expired on the stake pool. + + +
fun calculate_stake_pool_drift(pool: &delegation_pool::DelegationPool): (bool, u64, u64, u64, u64)
+
+ + + +
+Implementation + + +
fun calculate_stake_pool_drift(pool: &DelegationPool): (bool, u64, u64, u64, u64) {
+    let (active, inactive, pending_active, pending_inactive) = stake::get_stake(get_pool_address(pool));
+    assert!(
+        inactive >= pool.total_coins_inactive,
+        error::invalid_state(ESLASHED_INACTIVE_STAKE_ON_PAST_OLC)
+    );
+    // determine whether a new lockup cycle has been ended on the stake pool and
+    // inactivated SOME `pending_inactive` stake which should stop earning rewards now,
+    // thus requiring separation of the `pending_inactive` stake on current observed lockup
+    // and the future one on the newly started lockup
+    let lockup_cycle_ended = inactive > pool.total_coins_inactive;
+
+    // actual coins on stake pool belonging to the active shares pool
+    active = active + pending_active;
+    // actual coins on stake pool belonging to the shares pool hosting `pending_inactive` stake
+    // at current observed lockup cycle, either pending: `pending_inactive` or already inactivated:
+    if (lockup_cycle_ended) {
+        // `inactive` on stake pool = any previous `inactive` stake +
+        // any previous `pending_inactive` stake and its rewards (both inactivated)
+        pending_inactive = inactive - pool.total_coins_inactive
+    };
+
+    // on stake-management operations, total coins on the internal shares pools and individual
+    // stakes on the stake pool are updated simultaneously, thus the only stakes becoming
+    // unsynced are rewards and slashes routed exclusively to/out the stake pool
+
+    // operator `active` rewards not persisted yet to the active shares pool
+    let pool_active = total_coins(&pool.active_shares);
+    let commission_active = if (active > pool_active) {
+        math64::mul_div(active - pool_active, pool.operator_commission_percentage, MAX_FEE)
+    } else {
+        // handle any slashing applied to `active` stake
+        0
+    };
+    // operator `pending_inactive` rewards not persisted yet to the pending_inactive shares pool
+    let pool_pending_inactive = total_coins(pending_inactive_shares_pool(pool));
+    let commission_pending_inactive = if (pending_inactive > pool_pending_inactive) {
+        math64::mul_div(
+            pending_inactive - pool_pending_inactive,
+            pool.operator_commission_percentage,
+            MAX_FEE
+        )
+    } else {
+        // handle any slashing applied to `pending_inactive` stake
+        0
+    };
+
+    (lockup_cycle_ended, active, pending_inactive, commission_active, commission_pending_inactive)
+}
+
+ + + +
+ + + +## Function `synchronize_delegation_pool` + +Synchronize delegation and stake pools: distribute yet-undetected rewards to the corresponding internal +shares pools, assign commission to operator and eventually prepare delegation pool for a new lockup cycle. + + +
public entry fun synchronize_delegation_pool(pool_address: address)
+
+ + + +
+Implementation + + +
public entry fun synchronize_delegation_pool(
+    pool_address: address
+) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    assert_delegation_pool_exists(pool_address);
+    let pool = borrow_global_mut<DelegationPool>(pool_address);
+    let (
+        lockup_cycle_ended,
+        active,
+        pending_inactive,
+        commission_active,
+        commission_pending_inactive
+    ) = calculate_stake_pool_drift(pool);
+
+    // zero `pending_active` stake indicates that either there are no `add_stake` fees or
+    // previous epoch has ended and should release the shares owning the existing fees
+    let (_, _, pending_active, _) = stake::get_stake(pool_address);
+    if (pending_active == 0) {
+        // renounce ownership over the `add_stake` fees by redeeming all shares of
+        // the special shareholder, implicitly their equivalent coins, out of the active shares pool
+        redeem_active_shares(pool, NULL_SHAREHOLDER, MAX_U64);
+    };
+
+    // distribute rewards remaining after commission, to delegators (to already existing shares)
+    // before buying shares for the operator for its entire commission fee
+    // otherwise, operator's new shares would additionally appreciate from rewards it does not own
+
+    // update total coins accumulated by `active` + `pending_active` shares
+    // redeemed `add_stake` fees are restored and distributed to the rest of the pool as rewards
+    pool_u64::update_total_coins(&mut pool.active_shares, active - commission_active);
+    // update total coins accumulated by `pending_inactive` shares at current observed lockup cycle
+    pool_u64::update_total_coins(
+        pending_inactive_shares_pool_mut(pool),
+        pending_inactive - commission_pending_inactive
+    );
+
+    // reward operator its commission out of uncommitted active rewards (`add_stake` fees already excluded)
+    buy_in_active_shares(pool, beneficiary_for_operator(stake::get_operator(pool_address)), commission_active);
+    // reward operator its commission out of uncommitted pending_inactive rewards
+    buy_in_pending_inactive_shares(
+        pool,
+        beneficiary_for_operator(stake::get_operator(pool_address)),
+        commission_pending_inactive
+    );
+
+    event::emit_event(
+        &mut pool.distribute_commission_events,
+        DistributeCommissionEvent {
+            pool_address,
+            operator: stake::get_operator(pool_address),
+            commission_active,
+            commission_pending_inactive,
+        },
+    );
+
+    if (features::operator_beneficiary_change_enabled()) {
+        emit(DistributeCommission {
+            pool_address,
+            operator: stake::get_operator(pool_address),
+            beneficiary: beneficiary_for_operator(stake::get_operator(pool_address)),
+            commission_active,
+            commission_pending_inactive,
+        })
+    };
+
+    // advance lockup cycle on delegation pool if already ended on stake pool (AND stake explicitly inactivated)
+    if (lockup_cycle_ended) {
+        // capture inactive coins over all ended lockup cycles (including this ending one)
+        let (_, inactive, _, _) = stake::get_stake(pool_address);
+        pool.total_coins_inactive = inactive;
+
+        // advance lockup cycle on the delegation pool
+        pool.observed_lockup_cycle.index = pool.observed_lockup_cycle.index + 1;
+        // start new lockup cycle with a fresh shares pool for `pending_inactive` stake
+        table::add(
+            &mut pool.inactive_shares,
+            pool.observed_lockup_cycle,
+            pool_u64::create_with_scaling_factor(SHARES_SCALING_FACTOR)
+        );
+    };
+
+    if (is_next_commission_percentage_effective(pool_address)) {
+        pool.operator_commission_percentage = borrow_global<NextCommissionPercentage>(
+            pool_address
+        ).commission_percentage_next_lockup_cycle;
+    }
+}
+
+ + + +
+ + + +## Function `assert_and_update_proposal_used_voting_power` + + + +
fun assert_and_update_proposal_used_voting_power(governance_records: &mut delegation_pool::GovernanceRecords, pool_address: address, proposal_id: u64, voting_power: u64)
+
+ + + +
+Implementation + + +
inline fun assert_and_update_proposal_used_voting_power(
+    governance_records: &mut GovernanceRecords, pool_address: address, proposal_id: u64, voting_power: u64
+) {
+    let stake_pool_remaining_voting_power = aptos_governance::get_remaining_voting_power(pool_address, proposal_id);
+    let stake_pool_used_voting_power = aptos_governance::get_voting_power(
+        pool_address
+    ) - stake_pool_remaining_voting_power;
+    let proposal_used_voting_power = smart_table::borrow_mut_with_default(
+        &mut governance_records.votes_per_proposal,
+        proposal_id,
+        0
+    );
+    // A edge case: Before enabling partial governance voting on a delegation pool, the delegation pool has
+    // a voter which can vote with all voting power of this delegation pool. If the voter votes on a proposal after
+    // partial governance voting flag is enabled, the delegation pool doesn't have enough voting power on this
+    // proposal for all the delegators. To be fair, no one can vote on this proposal through this delegation pool.
+    // To detect this case, check if the stake pool had used voting power not through delegation_pool module.
+    assert!(
+        stake_pool_used_voting_power == *proposal_used_voting_power,
+        error::invalid_argument(EALREADY_VOTED_BEFORE_ENABLE_PARTIAL_VOTING)
+    );
+    *proposal_used_voting_power = *proposal_used_voting_power + voting_power;
+}
+
+ + + +
+ + + +## Function `update_governance_records_for_buy_in_active_shares` + + + +
fun update_governance_records_for_buy_in_active_shares(pool: &delegation_pool::DelegationPool, pool_address: address, new_shares: u128, shareholder: address)
+
+ + + +
+Implementation + + +
fun update_governance_records_for_buy_in_active_shares(
+    pool: &DelegationPool, pool_address: address, new_shares: u128, shareholder: address
+) acquires GovernanceRecords {
+    // <active shares> of <shareholder> += <new_shares> ---->
+    // <active shares> of <current voter of shareholder> += <new_shares>
+    // <active shares> of <next voter of shareholder> += <new_shares>
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let vote_delegation = update_and_borrow_mut_delegator_vote_delegation(pool, governance_records, shareholder);
+    let current_voter = vote_delegation.voter;
+    let pending_voter = vote_delegation.pending_voter;
+    let current_delegated_votes =
+        update_and_borrow_mut_delegated_votes(pool, governance_records, current_voter);
+    current_delegated_votes.active_shares = current_delegated_votes.active_shares + new_shares;
+    if (pending_voter == current_voter) {
+        current_delegated_votes.active_shares_next_lockup =
+            current_delegated_votes.active_shares_next_lockup + new_shares;
+    } else {
+        let pending_delegated_votes =
+            update_and_borrow_mut_delegated_votes(pool, governance_records, pending_voter);
+        pending_delegated_votes.active_shares_next_lockup =
+            pending_delegated_votes.active_shares_next_lockup + new_shares;
+    };
+}
+
+ + + +
+ + + +## Function `update_governance_records_for_buy_in_pending_inactive_shares` + + + +
fun update_governance_records_for_buy_in_pending_inactive_shares(pool: &delegation_pool::DelegationPool, pool_address: address, new_shares: u128, shareholder: address)
+
+ + + +
+Implementation + + +
fun update_governance_records_for_buy_in_pending_inactive_shares(
+    pool: &DelegationPool, pool_address: address, new_shares: u128, shareholder: address
+) acquires GovernanceRecords {
+    // <pending inactive shares> of <shareholder> += <new_shares>   ---->
+    // <pending inactive shares> of <current voter of shareholder> += <new_shares>
+    // no impact on <pending inactive shares> of <next voter of shareholder>
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let current_voter = calculate_and_update_delegator_voter_internal(pool, governance_records, shareholder);
+    let current_delegated_votes = update_and_borrow_mut_delegated_votes(pool, governance_records, current_voter);
+    current_delegated_votes.pending_inactive_shares = current_delegated_votes.pending_inactive_shares + new_shares;
+}
+
+ + + +
+ + + +## Function `update_governanace_records_for_redeem_active_shares` + + + +
fun update_governanace_records_for_redeem_active_shares(pool: &delegation_pool::DelegationPool, pool_address: address, shares_to_redeem: u128, shareholder: address)
+
+ + + +
+Implementation + + +
fun update_governanace_records_for_redeem_active_shares(
+    pool: &DelegationPool, pool_address: address, shares_to_redeem: u128, shareholder: address
+) acquires GovernanceRecords {
+    // <active shares> of <shareholder> -= <shares_to_redeem> ---->
+    // <active shares> of <current voter of shareholder> -= <shares_to_redeem>
+    // <active shares> of <next voter of shareholder> -= <shares_to_redeem>
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let vote_delegation = update_and_borrow_mut_delegator_vote_delegation(
+        pool,
+        governance_records,
+        shareholder
+    );
+    let current_voter = vote_delegation.voter;
+    let pending_voter = vote_delegation.pending_voter;
+    let current_delegated_votes = update_and_borrow_mut_delegated_votes(pool, governance_records, current_voter);
+    current_delegated_votes.active_shares = current_delegated_votes.active_shares - shares_to_redeem;
+    if (current_voter == pending_voter) {
+        current_delegated_votes.active_shares_next_lockup =
+            current_delegated_votes.active_shares_next_lockup - shares_to_redeem;
+    } else {
+        let pending_delegated_votes =
+            update_and_borrow_mut_delegated_votes(pool, governance_records, pending_voter);
+        pending_delegated_votes.active_shares_next_lockup =
+            pending_delegated_votes.active_shares_next_lockup - shares_to_redeem;
+    };
+}
+
+ + + +
+ + + +## Function `update_governanace_records_for_redeem_pending_inactive_shares` + + + +
fun update_governanace_records_for_redeem_pending_inactive_shares(pool: &delegation_pool::DelegationPool, pool_address: address, shares_to_redeem: u128, shareholder: address)
+
+ + + +
+Implementation + + +
fun update_governanace_records_for_redeem_pending_inactive_shares(
+    pool: &DelegationPool, pool_address: address, shares_to_redeem: u128, shareholder: address
+) acquires GovernanceRecords {
+    // <pending inactive shares> of <shareholder> -= <shares_to_redeem>  ---->
+    // <pending inactive shares> of <current voter of shareholder> -= <shares_to_redeem>
+    // no impact on <pending inactive shares> of <next voter of shareholder>
+    let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
+    let current_voter = calculate_and_update_delegator_voter_internal(pool, governance_records, shareholder);
+    let current_delegated_votes = update_and_borrow_mut_delegated_votes(pool, governance_records, current_voter);
+    current_delegated_votes.pending_inactive_shares = current_delegated_votes.pending_inactive_shares - shares_to_redeem;
+}
+
+ + + +
+ + + +## Function `multiply_then_divide` + +Deprecated, prefer math64::mul_div + + +
#[deprecated]
+public fun multiply_then_divide(x: u64, y: u64, z: u64): u64
+
+ + + +
+Implementation + + +
public fun multiply_then_divide(x: u64, y: u64, z: u64): u64 {
+    math64::mul_div(x, y, z)
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Every DelegationPool has only one corresponding StakePool stored at the same address.CriticalUpon calling the initialize_delegation_pool function, a resource account is created from the "owner" signer to host the delegation pool resource and own the underlying stake pool.Audited that the address of StakePool equals address of DelegationPool and the data invariant on the DelegationPool.
2The signer capability within the delegation pool has an address equal to the address of the delegation pool.CriticalThe initialize_delegation_pool function moves the DelegationPool resource to the address associated with stake_pool_signer, which also possesses the signer capability.Audited that the address of signer cap equals address of DelegationPool.
3A delegator holds shares exclusively in one inactive shares pool, which could either be an already inactive pool or the pending_inactive pool.HighThe get_stake function returns the inactive stake owned by a delegator and checks which state the shares are in via the get_pending_withdrawal function.Audited that either inactive or pending_inactive stake after invoking the get_stake function is zero and both are never non-zero.
4The specific pool in which the delegator possesses inactive shares becomes designated as the pending withdrawal pool for that delegator.MediumThe get_pending_withdrawal function checks if any pending withdrawal exists for a delegate address and if there is neither inactive nor pending_inactive stake, the pending_withdrawal_exists returns false.This has been audited.
5The existence of a pending withdrawal implies that it is associated with a pool where the delegator possesses inactive shares.MediumIn the get_pending_withdrawal function, if withdrawal_exists is true, the function returns true and a non-zero amountget_pending_withdrawal has been audited.
6An inactive shares pool should have coins allocated to it; otherwise, it should become deleted.MediumThe redeem_inactive_shares function has a check that destroys the inactive shares pool, given that it is empty.shares pools have been audited.
7The index of the pending withdrawal will not exceed the current OLC on DelegationPool.HighThe get_pending_withdrawal function has a check which ensures that withdrawal_olc.index < pool.observed_lockup_cycle.index.This has been audited.
8Slashing is not possible for inactive stakes.CriticalThe number of inactive staked coins must be greater than or equal to the total_coins_inactive of the pool.This has been audited.
9The delegator's active or pending inactive stake will always meet or exceed the minimum allowed value.MediumThe add_stake, unlock and reactivate_stake functions ensure the active_shares or pending_inactive_shares balance for the delegator is greater than or equal to the MIN_COINS_ON_SHARES_POOL value.Audited the comparison of active_shares or inactive_shares balance for the delegator with the MIN_COINS_ON_SHARES_POOL value.
10The delegation pool exists at a given address.LowFunctions that operate on the DelegationPool abort if there is no DelegationPool struct under the given pool_address.Audited that there is no DelegationPool structure assigned to the pool_address given as a parameter.
11The initialization of the delegation pool is contingent upon enabling the delegation pools feature.CriticalThe initialize_delegation_pool function should proceed if the DELEGATION_POOLS feature is enabled.This has been audited.
+ + + + + + +### Module-level Specification + + +
pragma verify=false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/dispatchable_fungible_asset.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/dispatchable_fungible_asset.md new file mode 100644 index 0000000000000..dca680da9e6bc --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/dispatchable_fungible_asset.md @@ -0,0 +1,651 @@ + + + +# Module `0x1::dispatchable_fungible_asset` + +This defines the fungible asset module that can issue fungible asset of any Metadata object. The +metadata object can be any object that equipped with Metadata resource. + +The dispatchable_fungible_asset wraps the existing fungible_asset module and adds the ability for token issuer +to customize the logic for withdraw and deposit operations. For example: + +- Deflation token: a fixed percentage of token will be destructed upon transfer. +- Transfer allowlist: token can only be transfered to addresses in the allow list. +- Predicated transfer: transfer can only happen when some certain predicate has been met. +- Loyalty token: a fixed loyalty will be paid to a designated address when a fungible asset transfer happens + +The api listed here intended to be an in-place replacement for defi applications that uses fungible_asset api directly +and is safe for non-dispatchable (aka vanilla) fungible assets as well. + +See AIP-73 for further discussion + + +- [Resource `TransferRefStore`](#0x1_dispatchable_fungible_asset_TransferRefStore) +- [Constants](#@Constants_0) +- [Function `register_dispatch_functions`](#0x1_dispatchable_fungible_asset_register_dispatch_functions) +- [Function `register_derive_supply_dispatch_function`](#0x1_dispatchable_fungible_asset_register_derive_supply_dispatch_function) +- [Function `withdraw`](#0x1_dispatchable_fungible_asset_withdraw) +- [Function `deposit`](#0x1_dispatchable_fungible_asset_deposit) +- [Function `transfer`](#0x1_dispatchable_fungible_asset_transfer) +- [Function `transfer_assert_minimum_deposit`](#0x1_dispatchable_fungible_asset_transfer_assert_minimum_deposit) +- [Function `derived_balance`](#0x1_dispatchable_fungible_asset_derived_balance) +- [Function `derived_supply`](#0x1_dispatchable_fungible_asset_derived_supply) +- [Function `borrow_transfer_ref`](#0x1_dispatchable_fungible_asset_borrow_transfer_ref) +- [Function `dispatchable_withdraw`](#0x1_dispatchable_fungible_asset_dispatchable_withdraw) +- [Function `dispatchable_deposit`](#0x1_dispatchable_fungible_asset_dispatchable_deposit) +- [Function `dispatchable_derived_balance`](#0x1_dispatchable_fungible_asset_dispatchable_derived_balance) +- [Function `dispatchable_derived_supply`](#0x1_dispatchable_fungible_asset_dispatchable_derived_supply) +- [Specification](#@Specification_1) + - [Function `dispatchable_withdraw`](#@Specification_1_dispatchable_withdraw) + - [Function `dispatchable_deposit`](#@Specification_1_dispatchable_deposit) + - [Function `dispatchable_derived_balance`](#@Specification_1_dispatchable_derived_balance) + - [Function `dispatchable_derived_supply`](#@Specification_1_dispatchable_derived_supply) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::function_info;
+use 0x1::fungible_asset;
+use 0x1::object;
+use 0x1::option;
+
+ + + + + +## Resource `TransferRefStore` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct TransferRefStore has key
+
+ + + +
+Fields + + +
+
+transfer_ref: fungible_asset::TransferRef +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Feature is not activated yet on the network. + + +
const ENOT_ACTIVATED: u64 = 3;
+
+ + + + + +Recipient is not getting the guaranteed value; + + +
const EAMOUNT_MISMATCH: u64 = 2;
+
+ + + + + +Dispatch target is not loaded. + + +
const ENOT_LOADED: u64 = 4;
+
+ + + + + +TransferRefStore doesn't exist on the fungible asset type. + + +
const ESTORE_NOT_FOUND: u64 = 1;
+
+ + + + + +## Function `register_dispatch_functions` + + + +
public fun register_dispatch_functions(constructor_ref: &object::ConstructorRef, withdraw_function: option::Option<function_info::FunctionInfo>, deposit_function: option::Option<function_info::FunctionInfo>, derived_balance_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public fun register_dispatch_functions(
+    constructor_ref: &ConstructorRef,
+    withdraw_function: Option<FunctionInfo>,
+    deposit_function: Option<FunctionInfo>,
+    derived_balance_function: Option<FunctionInfo>,
+) {
+    fungible_asset::register_dispatch_functions(
+        constructor_ref,
+        withdraw_function,
+        deposit_function,
+        derived_balance_function,
+    );
+    let store_obj = &object::generate_signer(constructor_ref);
+    move_to<TransferRefStore>(
+        store_obj,
+        TransferRefStore {
+            transfer_ref: fungible_asset::generate_transfer_ref(constructor_ref),
+        }
+    );
+}
+
+ + + +
+ + + +## Function `register_derive_supply_dispatch_function` + + + +
public fun register_derive_supply_dispatch_function(constructor_ref: &object::ConstructorRef, dispatch_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public fun register_derive_supply_dispatch_function(
+    constructor_ref: &ConstructorRef,
+    dispatch_function: Option<FunctionInfo>
+) {
+    fungible_asset::register_derive_supply_dispatch_function(
+        constructor_ref,
+        dispatch_function
+    );
+}
+
+ + + +
+ + + +## Function `withdraw` + +Withdraw amount of the fungible asset from store by the owner. + +The semantics of deposit will be governed by the function specified in DispatchFunctionStore. + + +
public fun withdraw<T: key>(owner: &signer, store: object::Object<T>, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun withdraw<T: key>(
+    owner: &signer,
+    store: Object<T>,
+    amount: u64,
+): FungibleAsset acquires TransferRefStore {
+    fungible_asset::withdraw_sanity_check(owner, store, false);
+    let func_opt = fungible_asset::withdraw_dispatch_function(store);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let start_balance = fungible_asset::balance(store);
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        let fa = dispatchable_withdraw(
+            store,
+            amount,
+            borrow_transfer_ref(store),
+            func,
+        );
+        let end_balance = fungible_asset::balance(store);
+        assert!(amount <= start_balance - end_balance, error::aborted(EAMOUNT_MISMATCH));
+        fa
+    } else {
+        fungible_asset::withdraw_internal(object::object_address(&store), amount)
+    }
+}
+
+ + + +
+ + + +## Function `deposit` + +Deposit amount of the fungible asset to store. + +The semantics of deposit will be governed by the function specified in DispatchFunctionStore. + + +
public fun deposit<T: key>(store: object::Object<T>, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun deposit<T: key>(store: Object<T>, fa: FungibleAsset) acquires TransferRefStore {
+    fungible_asset::deposit_sanity_check(store, false);
+    let func_opt = fungible_asset::deposit_dispatch_function(store);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        dispatchable_deposit(
+            store,
+            fa,
+            borrow_transfer_ref(store),
+            func
+        )
+    } else {
+        fungible_asset::deposit_internal(object::object_address(&store), fa)
+    }
+}
+
+ + + +
+ + + +## Function `transfer` + +Transfer an amount of fungible asset from from_store, which should be owned by sender, to receiver. +Note: it does not move the underlying object. + + +
public entry fun transfer<T: key>(sender: &signer, from: object::Object<T>, to: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer<T: key>(
+    sender: &signer,
+    from: Object<T>,
+    to: Object<T>,
+    amount: u64,
+) acquires TransferRefStore {
+    let fa = withdraw(sender, from, amount);
+    deposit(to, fa);
+}
+
+ + + +
+ + + +## Function `transfer_assert_minimum_deposit` + +Transfer an amount of fungible asset from from_store, which should be owned by sender, to receiver. +The recipient is guranteed to receive asset greater than the expected amount. +Note: it does not move the underlying object. + + +
public entry fun transfer_assert_minimum_deposit<T: key>(sender: &signer, from: object::Object<T>, to: object::Object<T>, amount: u64, expected: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer_assert_minimum_deposit<T: key>(
+    sender: &signer,
+    from: Object<T>,
+    to: Object<T>,
+    amount: u64,
+    expected: u64
+) acquires TransferRefStore {
+    let start = fungible_asset::balance(to);
+    let fa = withdraw(sender, from, amount);
+    deposit(to, fa);
+    let end = fungible_asset::balance(to);
+    assert!(end - start >= expected, error::aborted(EAMOUNT_MISMATCH));
+}
+
+ + + +
+ + + +## Function `derived_balance` + +Get the derived value of store using the overloaded hook. + +The semantics of value will be governed by the function specified in DispatchFunctionStore. + + +
#[view]
+public fun derived_balance<T: key>(store: object::Object<T>): u64
+
+ + + +
+Implementation + + +
public fun derived_balance<T: key>(store: Object<T>): u64 {
+    let func_opt = fungible_asset::derived_balance_dispatch_function(store);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        dispatchable_derived_balance(store, func)
+    } else {
+        fungible_asset::balance(store)
+    }
+}
+
+ + + +
+ + + +## Function `derived_supply` + +Get the derived supply of the fungible asset using the overloaded hook. + +The semantics of supply will be governed by the function specified in DeriveSupplyDispatch. + + +
#[view]
+public fun derived_supply<T: key>(metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun derived_supply<T: key>(metadata: Object<T>): Option<u128> {
+    let func_opt = fungible_asset::derived_supply_dispatch_function(metadata);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        dispatchable_derived_supply(metadata, func)
+    } else {
+        fungible_asset::supply(metadata)
+    }
+}
+
+ + + +
+ + + +## Function `borrow_transfer_ref` + + + +
fun borrow_transfer_ref<T: key>(metadata: object::Object<T>): &fungible_asset::TransferRef
+
+ + + +
+Implementation + + +
inline fun borrow_transfer_ref<T: key>(metadata: Object<T>): &TransferRef acquires TransferRefStore {
+    let metadata_addr = object::object_address(
+        &fungible_asset::store_metadata(metadata)
+    );
+    assert!(
+        exists<TransferRefStore>(metadata_addr),
+        error::not_found(ESTORE_NOT_FOUND)
+    );
+    &borrow_global<TransferRefStore>(metadata_addr).transfer_ref
+}
+
+ + + +
+ + + +## Function `dispatchable_withdraw` + + + +
fun dispatchable_withdraw<T: key>(store: object::Object<T>, amount: u64, transfer_ref: &fungible_asset::TransferRef, function: &function_info::FunctionInfo): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
native fun dispatchable_withdraw<T: key>(
+    store: Object<T>,
+    amount: u64,
+    transfer_ref: &TransferRef,
+    function: &FunctionInfo,
+): FungibleAsset;
+
+ + + +
+ + + +## Function `dispatchable_deposit` + + + +
fun dispatchable_deposit<T: key>(store: object::Object<T>, fa: fungible_asset::FungibleAsset, transfer_ref: &fungible_asset::TransferRef, function: &function_info::FunctionInfo)
+
+ + + +
+Implementation + + +
native fun dispatchable_deposit<T: key>(
+    store: Object<T>,
+    fa: FungibleAsset,
+    transfer_ref: &TransferRef,
+    function: &FunctionInfo,
+);
+
+ + + +
+ + + +## Function `dispatchable_derived_balance` + + + +
fun dispatchable_derived_balance<T: key>(store: object::Object<T>, function: &function_info::FunctionInfo): u64
+
+ + + +
+Implementation + + +
native fun dispatchable_derived_balance<T: key>(
+    store: Object<T>,
+    function: &FunctionInfo,
+): u64;
+
+ + + +
+ + + +## Function `dispatchable_derived_supply` + + + +
fun dispatchable_derived_supply<T: key>(store: object::Object<T>, function: &function_info::FunctionInfo): option::Option<u128>
+
+ + + +
+Implementation + + +
native fun dispatchable_derived_supply<T: key>(
+    store: Object<T>,
+    function: &FunctionInfo,
+): Option<u128>;
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + + + + +### Function `dispatchable_withdraw` + + +
fun dispatchable_withdraw<T: key>(store: object::Object<T>, amount: u64, transfer_ref: &fungible_asset::TransferRef, function: &function_info::FunctionInfo): fungible_asset::FungibleAsset
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `dispatchable_deposit` + + +
fun dispatchable_deposit<T: key>(store: object::Object<T>, fa: fungible_asset::FungibleAsset, transfer_ref: &fungible_asset::TransferRef, function: &function_info::FunctionInfo)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `dispatchable_derived_balance` + + +
fun dispatchable_derived_balance<T: key>(store: object::Object<T>, function: &function_info::FunctionInfo): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `dispatchable_derived_supply` + + +
fun dispatchable_derived_supply<T: key>(store: object::Object<T>, function: &function_info::FunctionInfo): option::Option<u128>
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/dkg.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/dkg.md new file mode 100644 index 0000000000000..fde1df92a990e --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/dkg.md @@ -0,0 +1,524 @@ + + + +# Module `0x1::dkg` + +DKG on-chain states and helper functions. + + +- [Struct `DKGSessionMetadata`](#0x1_dkg_DKGSessionMetadata) +- [Struct `DKGStartEvent`](#0x1_dkg_DKGStartEvent) +- [Struct `DKGSessionState`](#0x1_dkg_DKGSessionState) +- [Resource `DKGState`](#0x1_dkg_DKGState) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_dkg_initialize) +- [Function `start`](#0x1_dkg_start) +- [Function `finish`](#0x1_dkg_finish) +- [Function `try_clear_incomplete_session`](#0x1_dkg_try_clear_incomplete_session) +- [Function `incomplete_session`](#0x1_dkg_incomplete_session) +- [Function `session_dealer_epoch`](#0x1_dkg_session_dealer_epoch) +- [Specification](#@Specification_1) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `start`](#@Specification_1_start) + - [Function `finish`](#@Specification_1_finish) + - [Function `try_clear_incomplete_session`](#@Specification_1_try_clear_incomplete_session) + - [Function `incomplete_session`](#@Specification_1_incomplete_session) + + +
use 0x1::error;
+use 0x1::event;
+use 0x1::option;
+use 0x1::randomness_config;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+use 0x1::validator_consensus_info;
+
+ + + + + +## Struct `DKGSessionMetadata` + +This can be considered as the public input of DKG. + + +
struct DKGSessionMetadata has copy, drop, store
+
+ + + +
+Fields + + +
+
+dealer_epoch: u64 +
+
+ +
+
+randomness_config: randomness_config::RandomnessConfig +
+
+ +
+
+dealer_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo> +
+
+ +
+
+target_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo> +
+
+ +
+
+ + +
+ + + +## Struct `DKGStartEvent` + + + +
#[event]
+struct DKGStartEvent has drop, store
+
+ + + +
+Fields + + +
+
+session_metadata: dkg::DKGSessionMetadata +
+
+ +
+
+start_time_us: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DKGSessionState` + +The input and output of a DKG session. +The validator set of epoch x works together for an DKG output for the target validator set of epoch x+1. + + +
struct DKGSessionState has copy, drop, store
+
+ + + +
+Fields + + +
+
+metadata: dkg::DKGSessionMetadata +
+
+ +
+
+start_time_us: u64 +
+
+ +
+
+transcript: vector<u8> +
+
+ +
+
+ + +
+ + + +## Resource `DKGState` + +The completed and in-progress DKG sessions. + + +
struct DKGState has key
+
+ + + +
+Fields + + +
+
+last_completed: option::Option<dkg::DKGSessionState> +
+
+ +
+
+in_progress: option::Option<dkg::DKGSessionState> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EDKG_IN_PROGRESS: u64 = 1;
+
+ + + + + + + +
const EDKG_NOT_IN_PROGRESS: u64 = 2;
+
+ + + + + +## Function `initialize` + +Called in genesis to initialize on-chain states. + + +
public fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    if (!exists<DKGState>(@aptos_framework)) {
+        move_to<DKGState>(
+            aptos_framework,
+            DKGState {
+                last_completed: std::option::none(),
+                in_progress: std::option::none(),
+            }
+        );
+    }
+}
+
+ + + +
+ + + +## Function `start` + +Mark on-chain DKG state as in-progress. Notify validators to start DKG. +Abort if a DKG is already in progress. + + +
public(friend) fun start(dealer_epoch: u64, randomness_config: randomness_config::RandomnessConfig, dealer_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>, target_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>)
+
+ + + +
+Implementation + + +
public(friend) fun start(
+    dealer_epoch: u64,
+    randomness_config: RandomnessConfig,
+    dealer_validator_set: vector<ValidatorConsensusInfo>,
+    target_validator_set: vector<ValidatorConsensusInfo>,
+) acquires DKGState {
+    let dkg_state = borrow_global_mut<DKGState>(@aptos_framework);
+    let new_session_metadata = DKGSessionMetadata {
+        dealer_epoch,
+        randomness_config,
+        dealer_validator_set,
+        target_validator_set,
+    };
+    let start_time_us = timestamp::now_microseconds();
+    dkg_state.in_progress = std::option::some(DKGSessionState {
+        metadata: new_session_metadata,
+        start_time_us,
+        transcript: vector[],
+    });
+
+    emit(DKGStartEvent {
+        start_time_us,
+        session_metadata: new_session_metadata,
+    });
+}
+
+ + + +
+ + + +## Function `finish` + +Put a transcript into the currently incomplete DKG session, then mark it completed. + +Abort if DKG is not in progress. + + +
public(friend) fun finish(transcript: vector<u8>)
+
+ + + +
+Implementation + + +
public(friend) fun finish(transcript: vector<u8>) acquires DKGState {
+    let dkg_state = borrow_global_mut<DKGState>(@aptos_framework);
+    assert!(option::is_some(&dkg_state.in_progress), error::invalid_state(EDKG_NOT_IN_PROGRESS));
+    let session = option::extract(&mut dkg_state.in_progress);
+    session.transcript = transcript;
+    dkg_state.last_completed = option::some(session);
+    dkg_state.in_progress = option::none();
+}
+
+ + + +
+ + + +## Function `try_clear_incomplete_session` + +Delete the currently incomplete session, if it exists. + + +
public fun try_clear_incomplete_session(fx: &signer)
+
+ + + +
+Implementation + + +
public fun try_clear_incomplete_session(fx: &signer) acquires DKGState {
+    system_addresses::assert_aptos_framework(fx);
+    if (exists<DKGState>(@aptos_framework)) {
+        let dkg_state = borrow_global_mut<DKGState>(@aptos_framework);
+        dkg_state.in_progress = option::none();
+    }
+}
+
+ + + +
+ + + +## Function `incomplete_session` + +Return the incomplete DKG session state, if it exists. + + +
public fun incomplete_session(): option::Option<dkg::DKGSessionState>
+
+ + + +
+Implementation + + +
public fun incomplete_session(): Option<DKGSessionState> acquires DKGState {
+    if (exists<DKGState>(@aptos_framework)) {
+        borrow_global<DKGState>(@aptos_framework).in_progress
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `session_dealer_epoch` + +Return the dealer epoch of a DKGSessionState. + + +
public fun session_dealer_epoch(session: &dkg::DKGSessionState): u64
+
+ + + +
+Implementation + + +
public fun session_dealer_epoch(session: &DKGSessionState): u64 {
+    session.metadata.dealer_epoch
+}
+
+ + + +
+ + + +## Specification + + + +
invariant [suspendable] chain_status::is_operating() ==> exists<DKGState>(@aptos_framework);
+
+ + + + + +### Function `initialize` + + +
public fun initialize(aptos_framework: &signer)
+
+ + + + +
let aptos_framework_addr = signer::address_of(aptos_framework);
+aborts_if aptos_framework_addr != @aptos_framework;
+
+ + + + + +### Function `start` + + +
public(friend) fun start(dealer_epoch: u64, randomness_config: randomness_config::RandomnessConfig, dealer_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>, target_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>)
+
+ + + + +
aborts_if !exists<DKGState>(@aptos_framework);
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+
+ + + + + +### Function `finish` + + +
public(friend) fun finish(transcript: vector<u8>)
+
+ + + + +
requires exists<DKGState>(@aptos_framework);
+requires option::is_some(global<DKGState>(@aptos_framework).in_progress);
+aborts_if false;
+
+ + + + + + + +
fun has_incomplete_session(): bool {
+   if (exists<DKGState>(@aptos_framework)) {
+       option::spec_is_some(global<DKGState>(@aptos_framework).in_progress)
+   } else {
+       false
+   }
+}
+
+ + + + + +### Function `try_clear_incomplete_session` + + +
public fun try_clear_incomplete_session(fx: &signer)
+
+ + + + +
let addr = signer::address_of(fx);
+aborts_if addr != @aptos_framework;
+
+ + + + + +### Function `incomplete_session` + + +
public fun incomplete_session(): option::Option<dkg::DKGSessionState>
+
+ + + + +
aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/event.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/event.md new file mode 100644 index 0000000000000..89fcbb69e6279 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/event.md @@ -0,0 +1,480 @@ + + + +# Module `0x1::event` + +The Event module defines an EventHandleGenerator that is used to create +EventHandles with unique GUIDs. It contains a counter for the number +of EventHandles it generates. An EventHandle is used to count the number of +events emitted to a handle and emit events to the event store. + + +- [Struct `EventHandle`](#0x1_event_EventHandle) +- [Function `emit`](#0x1_event_emit) +- [Function `write_module_event_to_store`](#0x1_event_write_module_event_to_store) +- [Function `new_event_handle`](#0x1_event_new_event_handle) +- [Function `emit_event`](#0x1_event_emit_event) +- [Function `guid`](#0x1_event_guid) +- [Function `counter`](#0x1_event_counter) +- [Function `write_to_event_store`](#0x1_event_write_to_event_store) +- [Function `destroy_handle`](#0x1_event_destroy_handle) +- [Specification](#@Specification_0) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `emit`](#@Specification_0_emit) + - [Function `write_module_event_to_store`](#@Specification_0_write_module_event_to_store) + - [Function `emit_event`](#@Specification_0_emit_event) + - [Function `guid`](#@Specification_0_guid) + - [Function `counter`](#@Specification_0_counter) + - [Function `write_to_event_store`](#@Specification_0_write_to_event_store) + - [Function `destroy_handle`](#@Specification_0_destroy_handle) + + +
use 0x1::bcs;
+use 0x1::guid;
+
+ + + + + +## Struct `EventHandle` + +A handle for an event such that: +1. Other modules can emit events to this handle. +2. Storage can use this handle to prove the total number of events that happened in the past. + + +
#[deprecated]
+struct EventHandle<T: drop, store> has store
+
+ + + +
+Fields + + +
+
+counter: u64 +
+
+ Total number of events emitted to this event stream. +
+
+guid: guid::GUID +
+
+ A globally unique ID for this event stream. +
+
+ + +
+ + + +## Function `emit` + +Emit a module event with payload msg. + + +
public fun emit<T: drop, store>(msg: T)
+
+ + + +
+Implementation + + +
public fun emit<T: store + drop>(msg: T) {
+    write_module_event_to_store<T>(msg);
+}
+
+ + + +
+ + + +## Function `write_module_event_to_store` + +Log msg with the event stream identified by T + + +
fun write_module_event_to_store<T: drop, store>(msg: T)
+
+ + + +
+Implementation + + +
native fun write_module_event_to_store<T: drop + store>(msg: T);
+
+ + + +
+ + + +## Function `new_event_handle` + +Use EventHandleGenerator to generate a unique event handle for sig + + +
#[deprecated]
+public(friend) fun new_event_handle<T: drop, store>(guid: guid::GUID): event::EventHandle<T>
+
+ + + +
+Implementation + + +
public(friend) fun new_event_handle<T: drop + store>(guid: GUID): EventHandle<T> {
+    EventHandle<T> {
+        counter: 0,
+        guid,
+    }
+}
+
+ + + +
+ + + +## Function `emit_event` + +Emit an event with payload msg by using handle_ref's key and counter. + + +
#[deprecated]
+public fun emit_event<T: drop, store>(handle_ref: &mut event::EventHandle<T>, msg: T)
+
+ + + +
+Implementation + + +
public fun emit_event<T: drop + store>(handle_ref: &mut EventHandle<T>, msg: T) {
+    write_to_event_store<T>(bcs::to_bytes(&handle_ref.guid), handle_ref.counter, msg);
+    spec {
+        assume handle_ref.counter + 1 <= MAX_U64;
+    };
+    handle_ref.counter = handle_ref.counter + 1;
+}
+
+ + + +
+ + + +## Function `guid` + +Return the GUID associated with this EventHandle + + +
#[deprecated]
+public fun guid<T: drop, store>(handle_ref: &event::EventHandle<T>): &guid::GUID
+
+ + + +
+Implementation + + +
public fun guid<T: drop + store>(handle_ref: &EventHandle<T>): &GUID {
+    &handle_ref.guid
+}
+
+ + + +
+ + + +## Function `counter` + +Return the current counter associated with this EventHandle + + +
#[deprecated]
+public fun counter<T: drop, store>(handle_ref: &event::EventHandle<T>): u64
+
+ + + +
+Implementation + + +
public fun counter<T: drop + store>(handle_ref: &EventHandle<T>): u64 {
+    handle_ref.counter
+}
+
+ + + +
+ + + +## Function `write_to_event_store` + +Log msg as the countth event associated with the event stream identified by guid + + +
#[deprecated]
+fun write_to_event_store<T: drop, store>(guid: vector<u8>, count: u64, msg: T)
+
+ + + +
+Implementation + + +
native fun write_to_event_store<T: drop + store>(guid: vector<u8>, count: u64, msg: T);
+
+ + + +
+ + + +## Function `destroy_handle` + +Destroy a unique handle. + + +
#[deprecated]
+public fun destroy_handle<T: drop, store>(handle: event::EventHandle<T>)
+
+ + + +
+Implementation + + +
public fun destroy_handle<T: drop + store>(handle: EventHandle<T>) {
+    EventHandle<T> { counter: _, guid: _ } = handle;
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Each event handle possesses a distinct and unique GUID.CriticalThe new_event_handle function creates an EventHandle object with a unique GUID, ensuring distinct identification.Audited: GUIDs are created in guid::create. Each time the function is called, it increments creation_num_ref. Multiple calls to the function will result in distinct GUID values.
2Unable to publish two events with the same GUID & sequence number.CriticalTwo events may either have the same GUID with a different counter or the same counter with a different GUID.This is implied by high-level requirement 1.
3Event native functions respect normal Move rules around object creation and destruction.CriticalMust follow the same rules and principles that apply to object creation and destruction in Move when using event native functions.The native functions of this module have been manually audited.
4Counter increases monotonically between event emissionsMediumWith each event emission, the emit_event function increments the counter of the EventHandle by one.Formally verified in the post condition of emit_event.
5For a given EventHandle, it should always be possible to: (1) return the GUID associated with this EventHandle, (2) return the current counter associated with this EventHandle, and (3) destroy the handle.LowThe following functions should not abort if EventHandle exists: guid(), counter(), destroy_handle().Formally verified via guid, counter and destroy_handle.
+ + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `emit` + + +
public fun emit<T: drop, store>(msg: T)
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `write_module_event_to_store` + + +
fun write_module_event_to_store<T: drop, store>(msg: T)
+
+ + +Native function use opaque. + + +
pragma opaque;
+
+ + + + + +### Function `emit_event` + + +
#[deprecated]
+public fun emit_event<T: drop, store>(handle_ref: &mut event::EventHandle<T>, msg: T)
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+// This enforces high-level requirement 4:
+ensures [concrete] handle_ref.counter == old(handle_ref.counter) + 1;
+
+ + + + + +### Function `guid` + + +
#[deprecated]
+public fun guid<T: drop, store>(handle_ref: &event::EventHandle<T>): &guid::GUID
+
+ + + + +
// This enforces high-level requirement 5:
+aborts_if false;
+
+ + + + + +### Function `counter` + + +
#[deprecated]
+public fun counter<T: drop, store>(handle_ref: &event::EventHandle<T>): u64
+
+ + + + +
// This enforces high-level requirement 5:
+aborts_if false;
+
+ + + + + +### Function `write_to_event_store` + + +
#[deprecated]
+fun write_to_event_store<T: drop, store>(guid: vector<u8>, count: u64, msg: T)
+
+ + +Native function use opaque. + + +
pragma opaque;
+
+ + + + + +### Function `destroy_handle` + + +
#[deprecated]
+public fun destroy_handle<T: drop, store>(handle: event::EventHandle<T>)
+
+ + + + +
// This enforces high-level requirement 5:
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/execution_config.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/execution_config.md new file mode 100644 index 0000000000000..27d894d51a795 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/execution_config.md @@ -0,0 +1,252 @@ + + + +# Module `0x1::execution_config` + +Maintains the execution config for the blockchain. The config is stored in a +Reconfiguration, and may be updated by root. + + +- [Resource `ExecutionConfig`](#0x1_execution_config_ExecutionConfig) +- [Constants](#@Constants_0) +- [Function `set`](#0x1_execution_config_set) +- [Function `set_for_next_epoch`](#0x1_execution_config_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_execution_config_on_new_epoch) +- [Specification](#@Specification_1) + - [Function `set`](#@Specification_1_set) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + + +
use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::error;
+use 0x1::reconfiguration;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `ExecutionConfig` + + + +
struct ExecutionConfig has drop, store, key
+
+ + + +
+Fields + + +
+
+config: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The provided on chain config bytes are empty or invalid + + +
const EINVALID_CONFIG: u64 = 1;
+
+ + + + + +## Function `set` + +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun set(account: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set(account: &signer, config: vector<u8>) acquires ExecutionConfig {
+    system_addresses::assert_aptos_framework(account);
+    chain_status::assert_genesis();
+
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+
+    if (exists<ExecutionConfig>(@aptos_framework)) {
+        let config_ref = &mut borrow_global_mut<ExecutionConfig>(@aptos_framework).config;
+        *config_ref = config;
+    } else {
+        move_to(account, ExecutionConfig { config });
+    };
+    // Need to trigger reconfiguration so validator nodes can sync on the updated configs.
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +This can be called by on-chain governance to update on-chain execution configs for the next epoch. +Example usage: +``` +aptos_framework::execution_config::set_for_next_epoch(&framework_signer, some_config_bytes); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>) {
+    system_addresses::assert_aptos_framework(account);
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+    config_buffer::upsert(ExecutionConfig { config });
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending ExecutionConfig, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires ExecutionConfig {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<ExecutionConfig>()) {
+        let config = config_buffer::extract<ExecutionConfig>();
+        if (exists<ExecutionConfig>(@aptos_framework)) {
+            *borrow_global_mut<ExecutionConfig>(@aptos_framework) = config;
+        } else {
+            move_to(framework, config);
+        };
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `set` + + +
public fun set(account: &signer, config: vector<u8>)
+
+ + +Ensure the caller is admin +When setting now time must be later than last_reconfiguration_time. + + +
pragma verify_duration_estimate = 600;
+let addr = signer::address_of(account);
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+requires chain_status::is_genesis();
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<staking_config::StakingRewardsConfig>(@aptos_framework);
+requires len(config) > 0;
+include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
+include aptos_coin::ExistsAptosCoin;
+requires system_addresses::is_aptos_framework_address(addr);
+requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time();
+ensures exists<ExecutionConfig>(@aptos_framework);
+
+ + + + + +### Function `set_for_next_epoch` + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + + +
include config_buffer::SetForNextEpochAbortsIf;
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<ExecutionConfig>;
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/function_info.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/function_info.md new file mode 100644 index 0000000000000..95e8a6710a7d6 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/function_info.md @@ -0,0 +1,362 @@ + + + +# Module `0x1::function_info` + +The function_info module defines the FunctionInfo type which simulates a function pointer. + + +- [Struct `FunctionInfo`](#0x1_function_info_FunctionInfo) +- [Constants](#@Constants_0) +- [Function `new_function_info`](#0x1_function_info_new_function_info) +- [Function `new_function_info_from_address`](#0x1_function_info_new_function_info_from_address) +- [Function `check_dispatch_type_compatibility`](#0x1_function_info_check_dispatch_type_compatibility) +- [Function `load_module_from_function`](#0x1_function_info_load_module_from_function) +- [Function `check_dispatch_type_compatibility_impl`](#0x1_function_info_check_dispatch_type_compatibility_impl) +- [Function `is_identifier`](#0x1_function_info_is_identifier) +- [Function `load_function_impl`](#0x1_function_info_load_function_impl) +- [Specification](#@Specification_1) + - [Function `check_dispatch_type_compatibility_impl`](#@Specification_1_check_dispatch_type_compatibility_impl) + - [Function `load_function_impl`](#@Specification_1_load_function_impl) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::signer;
+use 0x1::string;
+
+ + + + + +## Struct `FunctionInfo` + +A String holds a sequence of bytes which is guaranteed to be in utf8 format. + + +
struct FunctionInfo has copy, drop, store
+
+ + + +
+Fields + + +
+
+module_address: address +
+
+ +
+
+module_name: string::String +
+
+ +
+
+function_name: string::String +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Function specified in the FunctionInfo doesn't exist on chain. + + +
const EINVALID_FUNCTION: u64 = 2;
+
+ + + + + +String is not a valid Move identifier + + +
const EINVALID_IDENTIFIER: u64 = 1;
+
+ + + + + +Feature hasn't been activated yet. + + +
const ENOT_ACTIVATED: u64 = 3;
+
+ + + + + +## Function `new_function_info` + +Creates a new function info from names. + + +
public fun new_function_info(module_signer: &signer, module_name: string::String, function_name: string::String): function_info::FunctionInfo
+
+ + + +
+Implementation + + +
public fun new_function_info(
+    module_signer: &signer,
+    module_name: String,
+    function_name: String,
+): FunctionInfo {
+    new_function_info_from_address(
+        signer::address_of(module_signer),
+        module_name,
+        function_name,
+    )
+}
+
+ + + +
+ + + +## Function `new_function_info_from_address` + + + +
public(friend) fun new_function_info_from_address(module_address: address, module_name: string::String, function_name: string::String): function_info::FunctionInfo
+
+ + + +
+Implementation + + +
public(friend) fun new_function_info_from_address(
+    module_address: address,
+    module_name: String,
+    function_name: String,
+): FunctionInfo {
+    assert!(
+        is_identifier(string::bytes(&module_name)),
+        EINVALID_IDENTIFIER
+    );
+    assert!(
+        is_identifier(string::bytes(&function_name)),
+        EINVALID_IDENTIFIER
+    );
+    FunctionInfo {
+        module_address,
+        module_name,
+        function_name,
+    }
+}
+
+ + + +
+ + + +## Function `check_dispatch_type_compatibility` + +Check if the dispatch target function meets the type requirements of the disptach entry point. + +framework_function is the dispatch native function defined in the aptos_framework. +dispatch_target is the function passed in by the user. + +dispatch_target should have the same signature (same argument type, same generics constraint) except +that the framework_function will have a &FunctionInfo in the last argument that will instruct the VM which +function to jump to. + +dispatch_target also needs to be public so the type signature will remain unchanged. + + +
public(friend) fun check_dispatch_type_compatibility(framework_function: &function_info::FunctionInfo, dispatch_target: &function_info::FunctionInfo): bool
+
+ + + +
+Implementation + + +
public(friend) fun check_dispatch_type_compatibility(
+    framework_function: &FunctionInfo,
+    dispatch_target: &FunctionInfo,
+): bool {
+    assert!(
+        features::dispatchable_fungible_asset_enabled(),
+        error::aborted(ENOT_ACTIVATED)
+    );
+    load_function_impl(dispatch_target);
+    check_dispatch_type_compatibility_impl(framework_function, dispatch_target)
+}
+
+ + + +
+ + + +## Function `load_module_from_function` + +Load up a function into VM's loader and charge for its dependencies + +It is **critical** to make sure that this function is invoked before check_dispatch_type_compatibility +or performing any other dispatching logic to ensure: +1. We properly charge gas for the function to dispatch. +2. The function is loaded in the cache so that we can perform further type checking/dispatching logic. + +Calling check_dispatch_type_compatibility_impl or dispatch without loading up the module would yield an error +if such module isn't accessed previously in the transaction. + + +
public(friend) fun load_module_from_function(f: &function_info::FunctionInfo)
+
+ + + +
+Implementation + + +
public(friend) fun load_module_from_function(f: &FunctionInfo) {
+    load_function_impl(f)
+}
+
+ + + +
+ + + +## Function `check_dispatch_type_compatibility_impl` + + + +
fun check_dispatch_type_compatibility_impl(lhs: &function_info::FunctionInfo, r: &function_info::FunctionInfo): bool
+
+ + + +
+Implementation + + +
native fun check_dispatch_type_compatibility_impl(lhs: &FunctionInfo, r: &FunctionInfo): bool;
+
+ + + +
+ + + +## Function `is_identifier` + + + +
fun is_identifier(s: &vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun is_identifier(s: &vector<u8>): bool;
+
+ + + +
+ + + +## Function `load_function_impl` + + + +
fun load_function_impl(f: &function_info::FunctionInfo)
+
+ + + +
+Implementation + + +
native fun load_function_impl(f: &FunctionInfo);
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + + + + +### Function `check_dispatch_type_compatibility_impl` + + +
fun check_dispatch_type_compatibility_impl(lhs: &function_info::FunctionInfo, r: &function_info::FunctionInfo): bool
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `load_function_impl` + + +
fun load_function_impl(f: &function_info::FunctionInfo)
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/fungible_asset.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/fungible_asset.md new file mode 100644 index 0000000000000..eb352c025f1ce --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/fungible_asset.md @@ -0,0 +1,3853 @@ + + + +# Module `0x1::fungible_asset` + +This defines the fungible asset module that can issue fungible asset of any Metadata object. The +metadata object can be any object that equipped with Metadata resource. + + +- [Resource `Supply`](#0x1_fungible_asset_Supply) +- [Resource `ConcurrentSupply`](#0x1_fungible_asset_ConcurrentSupply) +- [Resource `Metadata`](#0x1_fungible_asset_Metadata) +- [Resource `Untransferable`](#0x1_fungible_asset_Untransferable) +- [Resource `FungibleStore`](#0x1_fungible_asset_FungibleStore) +- [Resource `DispatchFunctionStore`](#0x1_fungible_asset_DispatchFunctionStore) +- [Resource `DeriveSupply`](#0x1_fungible_asset_DeriveSupply) +- [Resource `ConcurrentFungibleBalance`](#0x1_fungible_asset_ConcurrentFungibleBalance) +- [Struct `FungibleAsset`](#0x1_fungible_asset_FungibleAsset) +- [Struct `MintRef`](#0x1_fungible_asset_MintRef) +- [Struct `TransferRef`](#0x1_fungible_asset_TransferRef) +- [Struct `BurnRef`](#0x1_fungible_asset_BurnRef) +- [Struct `MutateMetadataRef`](#0x1_fungible_asset_MutateMetadataRef) +- [Struct `Deposit`](#0x1_fungible_asset_Deposit) +- [Struct `Withdraw`](#0x1_fungible_asset_Withdraw) +- [Struct `Frozen`](#0x1_fungible_asset_Frozen) +- [Resource `FungibleAssetEvents`](#0x1_fungible_asset_FungibleAssetEvents) +- [Struct `DepositEvent`](#0x1_fungible_asset_DepositEvent) +- [Struct `WithdrawEvent`](#0x1_fungible_asset_WithdrawEvent) +- [Struct `FrozenEvent`](#0x1_fungible_asset_FrozenEvent) +- [Constants](#@Constants_0) +- [Function `default_to_concurrent_fungible_supply`](#0x1_fungible_asset_default_to_concurrent_fungible_supply) +- [Function `allow_upgrade_to_concurrent_fungible_balance`](#0x1_fungible_asset_allow_upgrade_to_concurrent_fungible_balance) +- [Function `default_to_concurrent_fungible_balance`](#0x1_fungible_asset_default_to_concurrent_fungible_balance) +- [Function `add_fungibility`](#0x1_fungible_asset_add_fungibility) +- [Function `set_untransferable`](#0x1_fungible_asset_set_untransferable) +- [Function `is_untransferable`](#0x1_fungible_asset_is_untransferable) +- [Function `register_dispatch_functions`](#0x1_fungible_asset_register_dispatch_functions) +- [Function `register_derive_supply_dispatch_function`](#0x1_fungible_asset_register_derive_supply_dispatch_function) +- [Function `register_dispatch_function_sanity_check`](#0x1_fungible_asset_register_dispatch_function_sanity_check) +- [Function `generate_mint_ref`](#0x1_fungible_asset_generate_mint_ref) +- [Function `generate_burn_ref`](#0x1_fungible_asset_generate_burn_ref) +- [Function `generate_transfer_ref`](#0x1_fungible_asset_generate_transfer_ref) +- [Function `generate_mutate_metadata_ref`](#0x1_fungible_asset_generate_mutate_metadata_ref) +- [Function `supply`](#0x1_fungible_asset_supply) +- [Function `maximum`](#0x1_fungible_asset_maximum) +- [Function `name`](#0x1_fungible_asset_name) +- [Function `symbol`](#0x1_fungible_asset_symbol) +- [Function `decimals`](#0x1_fungible_asset_decimals) +- [Function `icon_uri`](#0x1_fungible_asset_icon_uri) +- [Function `project_uri`](#0x1_fungible_asset_project_uri) +- [Function `metadata`](#0x1_fungible_asset_metadata) +- [Function `store_exists`](#0x1_fungible_asset_store_exists) +- [Function `store_exists_inline`](#0x1_fungible_asset_store_exists_inline) +- [Function `concurrent_fungible_balance_exists_inline`](#0x1_fungible_asset_concurrent_fungible_balance_exists_inline) +- [Function `metadata_from_asset`](#0x1_fungible_asset_metadata_from_asset) +- [Function `store_metadata`](#0x1_fungible_asset_store_metadata) +- [Function `amount`](#0x1_fungible_asset_amount) +- [Function `balance`](#0x1_fungible_asset_balance) +- [Function `is_balance_at_least`](#0x1_fungible_asset_is_balance_at_least) +- [Function `is_address_balance_at_least`](#0x1_fungible_asset_is_address_balance_at_least) +- [Function `is_frozen`](#0x1_fungible_asset_is_frozen) +- [Function `is_store_dispatchable`](#0x1_fungible_asset_is_store_dispatchable) +- [Function `deposit_dispatch_function`](#0x1_fungible_asset_deposit_dispatch_function) +- [Function `has_deposit_dispatch_function`](#0x1_fungible_asset_has_deposit_dispatch_function) +- [Function `withdraw_dispatch_function`](#0x1_fungible_asset_withdraw_dispatch_function) +- [Function `has_withdraw_dispatch_function`](#0x1_fungible_asset_has_withdraw_dispatch_function) +- [Function `derived_balance_dispatch_function`](#0x1_fungible_asset_derived_balance_dispatch_function) +- [Function `derived_supply_dispatch_function`](#0x1_fungible_asset_derived_supply_dispatch_function) +- [Function `asset_metadata`](#0x1_fungible_asset_asset_metadata) +- [Function `mint_ref_metadata`](#0x1_fungible_asset_mint_ref_metadata) +- [Function `transfer_ref_metadata`](#0x1_fungible_asset_transfer_ref_metadata) +- [Function `burn_ref_metadata`](#0x1_fungible_asset_burn_ref_metadata) +- [Function `object_from_metadata_ref`](#0x1_fungible_asset_object_from_metadata_ref) +- [Function `transfer`](#0x1_fungible_asset_transfer) +- [Function `create_store`](#0x1_fungible_asset_create_store) +- [Function `remove_store`](#0x1_fungible_asset_remove_store) +- [Function `withdraw`](#0x1_fungible_asset_withdraw) +- [Function `withdraw_sanity_check`](#0x1_fungible_asset_withdraw_sanity_check) +- [Function `deposit_sanity_check`](#0x1_fungible_asset_deposit_sanity_check) +- [Function `deposit`](#0x1_fungible_asset_deposit) +- [Function `mint`](#0x1_fungible_asset_mint) +- [Function `mint_internal`](#0x1_fungible_asset_mint_internal) +- [Function `mint_to`](#0x1_fungible_asset_mint_to) +- [Function `set_frozen_flag`](#0x1_fungible_asset_set_frozen_flag) +- [Function `set_frozen_flag_internal`](#0x1_fungible_asset_set_frozen_flag_internal) +- [Function `burn`](#0x1_fungible_asset_burn) +- [Function `burn_internal`](#0x1_fungible_asset_burn_internal) +- [Function `burn_from`](#0x1_fungible_asset_burn_from) +- [Function `address_burn_from`](#0x1_fungible_asset_address_burn_from) +- [Function `withdraw_with_ref`](#0x1_fungible_asset_withdraw_with_ref) +- [Function `deposit_with_ref`](#0x1_fungible_asset_deposit_with_ref) +- [Function `transfer_with_ref`](#0x1_fungible_asset_transfer_with_ref) +- [Function `mutate_metadata`](#0x1_fungible_asset_mutate_metadata) +- [Function `zero`](#0x1_fungible_asset_zero) +- [Function `extract`](#0x1_fungible_asset_extract) +- [Function `merge`](#0x1_fungible_asset_merge) +- [Function `destroy_zero`](#0x1_fungible_asset_destroy_zero) +- [Function `deposit_internal`](#0x1_fungible_asset_deposit_internal) +- [Function `withdraw_internal`](#0x1_fungible_asset_withdraw_internal) +- [Function `increase_supply`](#0x1_fungible_asset_increase_supply) +- [Function `decrease_supply`](#0x1_fungible_asset_decrease_supply) +- [Function `borrow_fungible_metadata`](#0x1_fungible_asset_borrow_fungible_metadata) +- [Function `borrow_fungible_metadata_mut`](#0x1_fungible_asset_borrow_fungible_metadata_mut) +- [Function `borrow_store_resource`](#0x1_fungible_asset_borrow_store_resource) +- [Function `upgrade_to_concurrent`](#0x1_fungible_asset_upgrade_to_concurrent) +- [Function `upgrade_store_to_concurrent`](#0x1_fungible_asset_upgrade_store_to_concurrent) +- [Function `ensure_store_upgraded_to_concurrent_internal`](#0x1_fungible_asset_ensure_store_upgraded_to_concurrent_internal) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + + +
use 0x1::aggregator_v2;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::function_info;
+use 0x1::object;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+
+ + + + + +## Resource `Supply` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Supply has key
+
+ + + +
+Fields + + +
+
+current: u128 +
+
+ +
+
+maximum: option::Option<u128> +
+
+ +
+
+ + +
+ + + +## Resource `ConcurrentSupply` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct ConcurrentSupply has key
+
+ + + +
+Fields + + +
+
+current: aggregator_v2::Aggregator<u128> +
+
+ +
+
+ + +
+ + + +## Resource `Metadata` + +Metadata of a Fungible asset + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Metadata has copy, drop, key
+
+ + + +
+Fields + + +
+
+name: string::String +
+
+ Name of the fungible metadata, i.e., "USDT". +
+
+symbol: string::String +
+
+ Symbol of the fungible metadata, usually a shorter version of the name. + For example, Singapore Dollar is SGD. +
+
+decimals: u8 +
+
+ Number of decimals used for display purposes. + For example, if decimals equals 2, a balance of 505 coins should + be displayed to a user as 5.05 (505 / 10 ** 2). +
+
+icon_uri: string::String +
+
+ The Uniform Resource Identifier (uri) pointing to an image that can be used as the icon for this fungible + asset. +
+
+project_uri: string::String +
+
+ The Uniform Resource Identifier (uri) pointing to the website for the fungible asset. +
+
+ + +
+ + + +## Resource `Untransferable` + +Defines a FungibleAsset, such that all FungibleStores stores are untransferable at +the object layer. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Untransferable has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `FungibleStore` + +The store object that holds fungible assets of a specific type associated with an account. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct FungibleStore has key
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ The address of the base metadata object. +
+
+balance: u64 +
+
+ The balance of the fungible metadata. +
+
+frozen: bool +
+
+ If true, owner transfer is disabled that only TransferRef can move in/out from this store. +
+
+ + +
+ + + +## Resource `DispatchFunctionStore` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct DispatchFunctionStore has key
+
+ + + +
+Fields + + +
+
+withdraw_function: option::Option<function_info::FunctionInfo> +
+
+ +
+
+deposit_function: option::Option<function_info::FunctionInfo> +
+
+ +
+
+derived_balance_function: option::Option<function_info::FunctionInfo> +
+
+ +
+
+ + +
+ + + +## Resource `DeriveSupply` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct DeriveSupply has key
+
+ + + +
+Fields + + +
+
+dispatch_function: option::Option<function_info::FunctionInfo> +
+
+ +
+
+ + +
+ + + +## Resource `ConcurrentFungibleBalance` + +The store object that holds concurrent fungible asset balance. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct ConcurrentFungibleBalance has key
+
+ + + +
+Fields + + +
+
+balance: aggregator_v2::Aggregator<u64> +
+
+ The balance of the fungible metadata. +
+
+ + +
+ + + +## Struct `FungibleAsset` + +FungibleAsset can be passed into function for type safety and to guarantee a specific amount. +FungibleAsset is ephemeral and cannot be stored directly. It must be deposited back into a store. + + +
struct FungibleAsset
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `MintRef` + +MintRef can be used to mint the fungible asset into an account's store. + + +
struct MintRef has drop, store
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `TransferRef` + +TransferRef can be used to allow or disallow the owner of fungible assets from transferring the asset +and allow the holder of TransferRef to transfer fungible assets from any account. + + +
struct TransferRef has drop, store
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `BurnRef` + +BurnRef can be used to burn fungible assets from a given holder account. + + +
struct BurnRef has drop, store
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `MutateMetadataRef` + +MutateMetadataRef can be used to directly modify the fungible asset's Metadata. + + +
struct MutateMetadataRef has drop, store
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `Deposit` + +Emitted when fungible assets are deposited into a store. + + +
#[event]
+struct Deposit has drop, store
+
+ + + +
+Fields + + +
+
+store: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Withdraw` + +Emitted when fungible assets are withdrawn from a store. + + +
#[event]
+struct Withdraw has drop, store
+
+ + + +
+Fields + + +
+
+store: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Frozen` + +Emitted when a store's frozen status is updated. + + +
#[event]
+struct Frozen has drop, store
+
+ + + +
+Fields + + +
+
+store: address +
+
+ +
+
+frozen: bool +
+
+ +
+
+ + +
+ + + +## Resource `FungibleAssetEvents` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+#[deprecated]
+struct FungibleAssetEvents has key
+
+ + + +
+Fields + + +
+
+deposit_events: event::EventHandle<fungible_asset::DepositEvent> +
+
+ +
+
+withdraw_events: event::EventHandle<fungible_asset::WithdrawEvent> +
+
+ +
+
+frozen_events: event::EventHandle<fungible_asset::FrozenEvent> +
+
+ +
+
+ + +
+ + + +## Struct `DepositEvent` + + + +
#[deprecated]
+struct DepositEvent has drop, store
+
+ + + +
+Fields + + +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawEvent` + + + +
#[deprecated]
+struct WithdrawEvent has drop, store
+
+ + + +
+Fields + + +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `FrozenEvent` + + + +
#[deprecated]
+struct FrozenEvent has drop, store
+
+ + + +
+Fields + + +
+
+frozen: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Maximum possible coin supply. + + +
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
+ + + + + +Trying to re-register dispatch hook on a fungible asset. + + +
const EALREADY_REGISTERED: u64 = 29;
+
+ + + + + +Amount cannot be zero. + + +
const EAMOUNT_CANNOT_BE_ZERO: u64 = 1;
+
+ + + + + +Cannot destroy non-empty fungible assets. + + +
const EAMOUNT_IS_NOT_ZERO: u64 = 12;
+
+ + + + + +Cannot register dispatch hook for APT. + + +
const EAPT_NOT_DISPATCHABLE: u64 = 31;
+
+ + + + + +Cannot destroy fungible stores with a non-zero balance. + + +
const EBALANCE_IS_NOT_ZERO: u64 = 14;
+
+ + + + + +Burn ref and fungible asset do not match. + + +
const EBURN_REF_AND_FUNGIBLE_ASSET_MISMATCH: u64 = 13;
+
+ + + + + +Burn ref and store do not match. + + +
const EBURN_REF_AND_STORE_MISMATCH: u64 = 10;
+
+ + + + + +Flag for Concurrent Supply not enabled + + +
const ECONCURRENT_BALANCE_NOT_ENABLED: u64 = 32;
+
+ + + + + +Flag for Concurrent Supply not enabled + + +
const ECONCURRENT_SUPPLY_NOT_ENABLED: u64 = 22;
+
+ + + + + +Decimals is over the maximum of 32 + + +
const EDECIMALS_TOO_LARGE: u64 = 17;
+
+ + + + + +Provided deposit function type doesn't meet the signature requirement. + + +
const EDEPOSIT_FUNCTION_SIGNATURE_MISMATCH: u64 = 26;
+
+ + + + + +Provided derived_balance function type doesn't meet the signature requirement. + + +
const EDERIVED_BALANCE_FUNCTION_SIGNATURE_MISMATCH: u64 = 27;
+
+ + + + + +Provided derived_supply function type doesn't meet the signature requirement. + + +
const EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH: u64 = 33;
+
+ + + + + +Fungible asset and store do not match. + + +
const EFUNGIBLE_ASSET_AND_STORE_MISMATCH: u64 = 11;
+
+ + + + + +Fungible asset do not match when merging. + + +
const EFUNGIBLE_ASSET_MISMATCH: u64 = 6;
+
+ + + + + +Fungible metadata does not exist on this account. + + +
const EFUNGIBLE_METADATA_EXISTENCE: u64 = 30;
+
+ + + + + +Flag for the existence of fungible store. + + +
const EFUNGIBLE_STORE_EXISTENCE: u64 = 23;
+
+ + + + + +Insufficient balance to withdraw or transfer. + + +
const EINSUFFICIENT_BALANCE: u64 = 4;
+
+ + + + + +Invalid withdraw/deposit on dispatchable token. The specified token has a dispatchable function hook. +Need to invoke dispatchable_fungible_asset::withdraw/deposit to perform transfer. + + +
const EINVALID_DISPATCHABLE_OPERATIONS: u64 = 28;
+
+ + + + + +The fungible asset's supply has exceeded maximum. + + +
const EMAX_SUPPLY_EXCEEDED: u64 = 5;
+
+ + + + + +The mint ref and the store do not match. + + +
const EMINT_REF_AND_STORE_MISMATCH: u64 = 7;
+
+ + + + + +Name of the fungible asset metadata is too long + + +
const ENAME_TOO_LONG: u64 = 15;
+
+ + + + + +Account is not the owner of metadata object. + + +
const ENOT_METADATA_OWNER: u64 = 24;
+
+ + + + + +Account is not the store's owner. + + +
const ENOT_STORE_OWNER: u64 = 8;
+
+ + + + + +Fungibility is only available for non-deletable objects. + + +
const EOBJECT_IS_DELETABLE: u64 = 18;
+
+ + + + + +Store is disabled from sending and receiving this fungible asset. + + +
const ESTORE_IS_FROZEN: u64 = 3;
+
+ + + + + +Supply resource is not found for a metadata object. + + +
const ESUPPLY_NOT_FOUND: u64 = 21;
+
+ + + + + +The fungible asset's supply will be negative which should be impossible. + + +
const ESUPPLY_UNDERFLOW: u64 = 20;
+
+ + + + + +Symbol of the fungible asset metadata is too long + + +
const ESYMBOL_TOO_LONG: u64 = 16;
+
+ + + + + +The transfer ref and the fungible asset do not match. + + +
const ETRANSFER_REF_AND_FUNGIBLE_ASSET_MISMATCH: u64 = 2;
+
+ + + + + +Transfer ref and store do not match. + + +
const ETRANSFER_REF_AND_STORE_MISMATCH: u64 = 9;
+
+ + + + + +URI for the icon of the fungible asset metadata is too long + + +
const EURI_TOO_LONG: u64 = 19;
+
+ + + + + +Provided withdraw function type doesn't meet the signature requirement. + + +
const EWITHDRAW_FUNCTION_SIGNATURE_MISMATCH: u64 = 25;
+
+ + + + + + + +
const MAX_DECIMALS: u8 = 32;
+
+ + + + + + + +
const MAX_NAME_LENGTH: u64 = 32;
+
+ + + + + + + +
const MAX_SYMBOL_LENGTH: u64 = 10;
+
+ + + + + + + +
const MAX_URI_LENGTH: u64 = 512;
+
+ + + + + +## Function `default_to_concurrent_fungible_supply` + + + +
fun default_to_concurrent_fungible_supply(): bool
+
+ + + +
+Implementation + + +
inline fun default_to_concurrent_fungible_supply(): bool {
+    features::concurrent_fungible_assets_enabled()
+}
+
+ + + +
+ + + +## Function `allow_upgrade_to_concurrent_fungible_balance` + + + +
fun allow_upgrade_to_concurrent_fungible_balance(): bool
+
+ + + +
+Implementation + + +
inline fun allow_upgrade_to_concurrent_fungible_balance(): bool {
+    features::concurrent_fungible_balance_enabled()
+}
+
+ + + +
+ + + +## Function `default_to_concurrent_fungible_balance` + + + +
fun default_to_concurrent_fungible_balance(): bool
+
+ + + +
+Implementation + + +
inline fun default_to_concurrent_fungible_balance(): bool {
+    features::default_to_concurrent_fungible_balance_enabled()
+}
+
+ + + +
+ + + +## Function `add_fungibility` + +Make an existing object fungible by adding the Metadata resource. +This returns the capabilities to mint, burn, and transfer. +maximum_supply defines the behavior of maximum supply when monitoring: +- option::none(): Monitoring unlimited supply +(width of the field - MAX_U128 is the implicit maximum supply) +if option::some(MAX_U128) is used, it is treated as unlimited supply. +- option::some(max): Monitoring fixed supply with max as the maximum supply. + + +
public fun add_fungibility(constructor_ref: &object::ConstructorRef, maximum_supply: option::Option<u128>, name: string::String, symbol: string::String, decimals: u8, icon_uri: string::String, project_uri: string::String): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun add_fungibility(
+    constructor_ref: &ConstructorRef,
+    maximum_supply: Option<u128>,
+    name: String,
+    symbol: String,
+    decimals: u8,
+    icon_uri: String,
+    project_uri: String,
+): Object<Metadata> {
+    assert!(!object::can_generate_delete_ref(constructor_ref), error::invalid_argument(EOBJECT_IS_DELETABLE));
+    let metadata_object_signer = &object::generate_signer(constructor_ref);
+    assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG));
+    assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG));
+    assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE));
+    assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    move_to(metadata_object_signer,
+        Metadata {
+            name,
+            symbol,
+            decimals,
+            icon_uri,
+            project_uri,
+        }
+    );
+
+    if (default_to_concurrent_fungible_supply()) {
+        let unlimited = option::is_none(&maximum_supply);
+        move_to(metadata_object_signer, ConcurrentSupply {
+            current: if (unlimited) {
+                aggregator_v2::create_unbounded_aggregator()
+            } else {
+                aggregator_v2::create_aggregator(option::extract(&mut maximum_supply))
+            },
+        });
+    } else {
+        move_to(metadata_object_signer, Supply {
+            current: 0,
+            maximum: maximum_supply
+        });
+    };
+
+    object::object_from_constructor_ref<Metadata>(constructor_ref)
+}
+
+ + + +
+ + + +## Function `set_untransferable` + +Set that only untransferable stores can be created for this fungible asset. + + +
public fun set_untransferable(constructor_ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
public fun set_untransferable(constructor_ref: &ConstructorRef) {
+    let metadata_addr = object::address_from_constructor_ref(constructor_ref);
+    assert!(exists<Metadata>(metadata_addr), error::not_found(EFUNGIBLE_METADATA_EXISTENCE));
+    let metadata_signer = &object::generate_signer(constructor_ref);
+    move_to(metadata_signer, Untransferable {});
+}
+
+ + + +
+ + + +## Function `is_untransferable` + +Returns true if the FA is untransferable. + + +
#[view]
+public fun is_untransferable<T: key>(metadata: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_untransferable<T: key>(metadata: Object<T>): bool {
+    exists<Untransferable>(object::object_address(&metadata))
+}
+
+ + + +
+ + + +## Function `register_dispatch_functions` + +Create a fungible asset store whose transfer rule would be overloaded by the provided function. + + +
public(friend) fun register_dispatch_functions(constructor_ref: &object::ConstructorRef, withdraw_function: option::Option<function_info::FunctionInfo>, deposit_function: option::Option<function_info::FunctionInfo>, derived_balance_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public(friend) fun register_dispatch_functions(
+    constructor_ref: &ConstructorRef,
+    withdraw_function: Option<FunctionInfo>,
+    deposit_function: Option<FunctionInfo>,
+    derived_balance_function: Option<FunctionInfo>,
+) {
+    // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+    option::for_each_ref(&withdraw_function, |withdraw_function| {
+        let dispatcher_withdraw_function_info = function_info::new_function_info_from_address(
+            @aptos_framework,
+            string::utf8(b"dispatchable_fungible_asset"),
+            string::utf8(b"dispatchable_withdraw"),
+        );
+
+        assert!(
+            function_info::check_dispatch_type_compatibility(
+                &dispatcher_withdraw_function_info,
+                withdraw_function
+            ),
+            error::invalid_argument(
+                EWITHDRAW_FUNCTION_SIGNATURE_MISMATCH
+            )
+        );
+    });
+
+    option::for_each_ref(&deposit_function, |deposit_function| {
+        let dispatcher_deposit_function_info = function_info::new_function_info_from_address(
+            @aptos_framework,
+            string::utf8(b"dispatchable_fungible_asset"),
+            string::utf8(b"dispatchable_deposit"),
+        );
+        // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+        assert!(
+            function_info::check_dispatch_type_compatibility(
+                &dispatcher_deposit_function_info,
+                deposit_function
+            ),
+            error::invalid_argument(
+                EDEPOSIT_FUNCTION_SIGNATURE_MISMATCH
+            )
+        );
+    });
+
+    option::for_each_ref(&derived_balance_function, |balance_function| {
+        let dispatcher_derived_balance_function_info = function_info::new_function_info_from_address(
+            @aptos_framework,
+            string::utf8(b"dispatchable_fungible_asset"),
+            string::utf8(b"dispatchable_derived_balance"),
+        );
+        // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+        assert!(
+            function_info::check_dispatch_type_compatibility(
+                &dispatcher_derived_balance_function_info,
+                balance_function
+            ),
+            error::invalid_argument(
+                EDERIVED_BALANCE_FUNCTION_SIGNATURE_MISMATCH
+            )
+        );
+    });
+    register_dispatch_function_sanity_check(constructor_ref);
+    assert!(
+        !exists<DispatchFunctionStore>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::already_exists(EALREADY_REGISTERED)
+    );
+
+    let store_obj = &object::generate_signer(constructor_ref);
+
+    // Store the overload function hook.
+    move_to<DispatchFunctionStore>(
+        store_obj,
+        DispatchFunctionStore {
+            withdraw_function,
+            deposit_function,
+            derived_balance_function,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `register_derive_supply_dispatch_function` + +Define the derived supply dispatch with the provided function. + + +
public(friend) fun register_derive_supply_dispatch_function(constructor_ref: &object::ConstructorRef, dispatch_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public(friend) fun register_derive_supply_dispatch_function(
+    constructor_ref: &ConstructorRef,
+    dispatch_function: Option<FunctionInfo>
+) {
+    // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+    option::for_each_ref(&dispatch_function, |supply_function| {
+        let function_info = function_info::new_function_info_from_address(
+            @aptos_framework,
+            string::utf8(b"dispatchable_fungible_asset"),
+            string::utf8(b"dispatchable_derived_supply"),
+        );
+        // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+        assert!(
+            function_info::check_dispatch_type_compatibility(
+                &function_info,
+                supply_function
+            ),
+            error::invalid_argument(
+                EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH
+            )
+        );
+    });
+    register_dispatch_function_sanity_check(constructor_ref);
+    assert!(
+        !exists<DeriveSupply>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::already_exists(EALREADY_REGISTERED)
+    );
+
+
+    let store_obj = &object::generate_signer(constructor_ref);
+
+    // Store the overload function hook.
+    move_to<DeriveSupply>(
+        store_obj,
+        DeriveSupply {
+            dispatch_function
+        }
+    );
+}
+
+ + + +
+ + + +## Function `register_dispatch_function_sanity_check` + +Check the requirements for registering a dispatchable function. + + +
fun register_dispatch_function_sanity_check(constructor_ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
inline fun register_dispatch_function_sanity_check(
+    constructor_ref: &ConstructorRef,
+)  {
+    // Cannot register hook for APT.
+    assert!(
+        object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset,
+        error::permission_denied(EAPT_NOT_DISPATCHABLE)
+    );
+    assert!(
+        !object::can_generate_delete_ref(constructor_ref),
+        error::invalid_argument(EOBJECT_IS_DELETABLE)
+    );
+    assert!(
+        exists<Metadata>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::not_found(EFUNGIBLE_METADATA_EXISTENCE),
+    );
+}
+
+ + + +
+ + + +## Function `generate_mint_ref` + +Creates a mint ref that can be used to mint fungible assets from the given fungible object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_mint_ref(constructor_ref: &object::ConstructorRef): fungible_asset::MintRef
+
+ + + +
+Implementation + + +
public fun generate_mint_ref(constructor_ref: &ConstructorRef): MintRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    MintRef { metadata }
+}
+
+ + + +
+ + + +## Function `generate_burn_ref` + +Creates a burn ref that can be used to burn fungible assets from the given fungible object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_burn_ref(constructor_ref: &object::ConstructorRef): fungible_asset::BurnRef
+
+ + + +
+Implementation + + +
public fun generate_burn_ref(constructor_ref: &ConstructorRef): BurnRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    BurnRef { metadata }
+}
+
+ + + +
+ + + +## Function `generate_transfer_ref` + +Creates a transfer ref that can be used to freeze/unfreeze/transfer fungible assets from the given fungible +object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_transfer_ref(constructor_ref: &object::ConstructorRef): fungible_asset::TransferRef
+
+ + + +
+Implementation + + +
public fun generate_transfer_ref(constructor_ref: &ConstructorRef): TransferRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    TransferRef { metadata }
+}
+
+ + + +
+ + + +## Function `generate_mutate_metadata_ref` + +Creates a mutate metadata ref that can be used to change the metadata information of fungible assets from the +given fungible object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_mutate_metadata_ref(constructor_ref: &object::ConstructorRef): fungible_asset::MutateMetadataRef
+
+ + + +
+Implementation + + +
public fun generate_mutate_metadata_ref(constructor_ref: &ConstructorRef): MutateMetadataRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    MutateMetadataRef { metadata }
+}
+
+ + + +
+ + + +## Function `supply` + +Get the current supply from the metadata object. + + +
#[view]
+public fun supply<T: key>(metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun supply<T: key>(metadata: Object<T>): Option<u128> acquires Supply, ConcurrentSupply {
+    let metadata_address = object::object_address(&metadata);
+    if (exists<ConcurrentSupply>(metadata_address)) {
+        let supply = borrow_global<ConcurrentSupply>(metadata_address);
+        option::some(aggregator_v2::read(&supply.current))
+    } else if (exists<Supply>(metadata_address)) {
+        let supply = borrow_global<Supply>(metadata_address);
+        option::some(supply.current)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `maximum` + +Get the maximum supply from the metadata object. +If supply is unlimited (or set explicitly to MAX_U128), none is returned + + +
#[view]
+public fun maximum<T: key>(metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun maximum<T: key>(metadata: Object<T>): Option<u128> acquires Supply, ConcurrentSupply {
+    let metadata_address = object::object_address(&metadata);
+    if (exists<ConcurrentSupply>(metadata_address)) {
+        let supply = borrow_global<ConcurrentSupply>(metadata_address);
+        let max_value = aggregator_v2::max_value(&supply.current);
+        if (max_value == MAX_U128) {
+            option::none()
+        } else {
+            option::some(max_value)
+        }
+    } else if (exists<Supply>(metadata_address)) {
+        let supply = borrow_global<Supply>(metadata_address);
+        supply.maximum
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `name` + +Get the name of the fungible asset from the metadata object. + + +
#[view]
+public fun name<T: key>(metadata: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun name<T: key>(metadata: Object<T>): String acquires Metadata {
+    borrow_fungible_metadata(&metadata).name
+}
+
+ + + +
+ + + +## Function `symbol` + +Get the symbol of the fungible asset from the metadata object. + + +
#[view]
+public fun symbol<T: key>(metadata: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun symbol<T: key>(metadata: Object<T>): String acquires Metadata {
+    borrow_fungible_metadata(&metadata).symbol
+}
+
+ + + +
+ + + +## Function `decimals` + +Get the decimals from the metadata object. + + +
#[view]
+public fun decimals<T: key>(metadata: object::Object<T>): u8
+
+ + + +
+Implementation + + +
public fun decimals<T: key>(metadata: Object<T>): u8 acquires Metadata {
+    borrow_fungible_metadata(&metadata).decimals
+}
+
+ + + +
+ + + +## Function `icon_uri` + +Get the icon uri from the metadata object. + + +
#[view]
+public fun icon_uri<T: key>(metadata: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun icon_uri<T: key>(metadata: Object<T>): String acquires Metadata {
+    borrow_fungible_metadata(&metadata).icon_uri
+}
+
+ + + +
+ + + +## Function `project_uri` + +Get the project uri from the metadata object. + + +
#[view]
+public fun project_uri<T: key>(metadata: object::Object<T>): string::String
+
+ + + +
+Implementation + + +
public fun project_uri<T: key>(metadata: Object<T>): String acquires Metadata {
+    borrow_fungible_metadata(&metadata).project_uri
+}
+
+ + + +
+ + + +## Function `metadata` + +Get the metadata struct from the metadata object. + + +
#[view]
+public fun metadata<T: key>(metadata: object::Object<T>): fungible_asset::Metadata
+
+ + + +
+Implementation + + +
public fun metadata<T: key>(metadata: Object<T>): Metadata acquires Metadata {
+    *borrow_fungible_metadata(&metadata)
+}
+
+ + + +
+ + + +## Function `store_exists` + +Return whether the provided address has a store initialized. + + +
#[view]
+public fun store_exists(store: address): bool
+
+ + + +
+Implementation + + +
public fun store_exists(store: address): bool {
+    store_exists_inline(store)
+}
+
+ + + +
+ + + +## Function `store_exists_inline` + +Return whether the provided address has a store initialized. + + +
fun store_exists_inline(store: address): bool
+
+ + + +
+Implementation + + +
inline fun store_exists_inline(store: address): bool {
+    exists<FungibleStore>(store)
+}
+
+ + + +
+ + + +## Function `concurrent_fungible_balance_exists_inline` + +Return whether the provided address has a concurrent fungible balance initialized, +at the fungible store address. + + +
fun concurrent_fungible_balance_exists_inline(store: address): bool
+
+ + + +
+Implementation + + +
inline fun concurrent_fungible_balance_exists_inline(store: address): bool {
+    exists<ConcurrentFungibleBalance>(store)
+}
+
+ + + +
+ + + +## Function `metadata_from_asset` + +Return the underlying metadata object + + +
public fun metadata_from_asset(fa: &fungible_asset::FungibleAsset): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun metadata_from_asset(fa: &FungibleAsset): Object<Metadata> {
+    fa.metadata
+}
+
+ + + +
+ + + +## Function `store_metadata` + +Return the underlying metadata object. + + +
#[view]
+public fun store_metadata<T: key>(store: object::Object<T>): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun store_metadata<T: key>(store: Object<T>): Object<Metadata> acquires FungibleStore {
+    borrow_store_resource(&store).metadata
+}
+
+ + + +
+ + + +## Function `amount` + +Return the amount of a given fungible asset. + + +
public fun amount(fa: &fungible_asset::FungibleAsset): u64
+
+ + + +
+Implementation + + +
public fun amount(fa: &FungibleAsset): u64 {
+    fa.amount
+}
+
+ + + +
+ + + +## Function `balance` + +Get the balance of a given store. + + +
#[view]
+public fun balance<T: key>(store: object::Object<T>): u64
+
+ + + +
+Implementation + + +
public fun balance<T: key>(store: Object<T>): u64 acquires FungibleStore, ConcurrentFungibleBalance {
+    let store_addr = object::object_address(&store);
+    if (store_exists_inline(store_addr)) {
+        let store_balance = borrow_store_resource(&store).balance;
+        if (store_balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
+            let balance_resource = borrow_global<ConcurrentFungibleBalance>(store_addr);
+            aggregator_v2::read(&balance_resource.balance)
+        } else {
+            store_balance
+        }
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `is_balance_at_least` + +Check whether the balance of a given store is >= amount. + + +
#[view]
+public fun is_balance_at_least<T: key>(store: object::Object<T>, amount: u64): bool
+
+ + + +
+Implementation + + +
public fun is_balance_at_least<T: key>(store: Object<T>, amount: u64): bool acquires FungibleStore, ConcurrentFungibleBalance {
+    let store_addr = object::object_address(&store);
+    is_address_balance_at_least(store_addr, amount)
+}
+
+ + + +
+ + + +## Function `is_address_balance_at_least` + +Check whether the balance of a given store is >= amount. + + +
public(friend) fun is_address_balance_at_least(store_addr: address, amount: u64): bool
+
+ + + +
+Implementation + + +
public(friend) fun is_address_balance_at_least(store_addr: address, amount: u64): bool acquires FungibleStore, ConcurrentFungibleBalance {
+    if (store_exists_inline(store_addr)) {
+        let store_balance = borrow_global<FungibleStore>(store_addr).balance;
+        if (store_balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
+            let balance_resource = borrow_global<ConcurrentFungibleBalance>(store_addr);
+            aggregator_v2::is_at_least(&balance_resource.balance, amount)
+        } else {
+            store_balance >= amount
+        }
+    } else {
+        amount == 0
+    }
+}
+
+ + + +
+ + + +## Function `is_frozen` + +Return whether a store is frozen. + +If the store has not been created, we default to returning false so deposits can be sent to it. + + +
#[view]
+public fun is_frozen<T: key>(store: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_frozen<T: key>(store: Object<T>): bool acquires FungibleStore {
+    let store_addr = object::object_address(&store);
+    store_exists_inline(store_addr) && borrow_global<FungibleStore>(store_addr).frozen
+}
+
+ + + +
+ + + +## Function `is_store_dispatchable` + +Return whether a fungible asset type is dispatchable. + + +
#[view]
+public fun is_store_dispatchable<T: key>(store: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_store_dispatchable<T: key>(store: Object<T>): bool acquires FungibleStore {
+    let fa_store = borrow_store_resource(&store);
+    let metadata_addr = object::object_address(&fa_store.metadata);
+    exists<DispatchFunctionStore>(metadata_addr)
+}
+
+ + + +
+ + + +## Function `deposit_dispatch_function` + + + +
public fun deposit_dispatch_function<T: key>(store: object::Object<T>): option::Option<function_info::FunctionInfo>
+
+ + + +
+Implementation + + +
public fun deposit_dispatch_function<T: key>(store: Object<T>): Option<FunctionInfo> acquires FungibleStore, DispatchFunctionStore {
+    let fa_store = borrow_store_resource(&store);
+    let metadata_addr = object::object_address(&fa_store.metadata);
+    if(exists<DispatchFunctionStore>(metadata_addr)) {
+        borrow_global<DispatchFunctionStore>(metadata_addr).deposit_function
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `has_deposit_dispatch_function` + + + +
fun has_deposit_dispatch_function(metadata: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
fun has_deposit_dispatch_function(metadata: Object<Metadata>): bool acquires DispatchFunctionStore {
+    let metadata_addr = object::object_address(&metadata);
+    // Short circuit on APT for better perf
+    if(metadata_addr != @aptos_fungible_asset && exists<DispatchFunctionStore>(metadata_addr)) {
+        option::is_some(&borrow_global<DispatchFunctionStore>(metadata_addr).deposit_function)
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `withdraw_dispatch_function` + + + +
public fun withdraw_dispatch_function<T: key>(store: object::Object<T>): option::Option<function_info::FunctionInfo>
+
+ + + +
+Implementation + + +
public fun withdraw_dispatch_function<T: key>(store: Object<T>): Option<FunctionInfo> acquires FungibleStore, DispatchFunctionStore {
+    let fa_store = borrow_store_resource(&store);
+    let metadata_addr = object::object_address(&fa_store.metadata);
+    if(exists<DispatchFunctionStore>(metadata_addr)) {
+        borrow_global<DispatchFunctionStore>(metadata_addr).withdraw_function
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `has_withdraw_dispatch_function` + + + +
fun has_withdraw_dispatch_function(metadata: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
fun has_withdraw_dispatch_function(metadata: Object<Metadata>): bool acquires DispatchFunctionStore {
+    let metadata_addr = object::object_address(&metadata);
+    // Short circuit on APT for better perf
+    if (metadata_addr != @aptos_fungible_asset && exists<DispatchFunctionStore>(metadata_addr)) {
+        option::is_some(&borrow_global<DispatchFunctionStore>(metadata_addr).withdraw_function)
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `derived_balance_dispatch_function` + + + +
public(friend) fun derived_balance_dispatch_function<T: key>(store: object::Object<T>): option::Option<function_info::FunctionInfo>
+
+ + + +
+Implementation + + +
public(friend) fun derived_balance_dispatch_function<T: key>(store: Object<T>): Option<FunctionInfo> acquires FungibleStore, DispatchFunctionStore {
+    let fa_store = borrow_store_resource(&store);
+    let metadata_addr = object::object_address(&fa_store.metadata);
+    if (exists<DispatchFunctionStore>(metadata_addr)) {
+        borrow_global<DispatchFunctionStore>(metadata_addr).derived_balance_function
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `derived_supply_dispatch_function` + + + +
public(friend) fun derived_supply_dispatch_function<T: key>(metadata: object::Object<T>): option::Option<function_info::FunctionInfo>
+
+ + + +
+Implementation + + +
public(friend) fun derived_supply_dispatch_function<T: key>(metadata: Object<T>): Option<FunctionInfo> acquires DeriveSupply {
+    let metadata_addr = object::object_address(&metadata);
+    if (exists<DeriveSupply>(metadata_addr)) {
+        borrow_global<DeriveSupply>(metadata_addr).dispatch_function
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `asset_metadata` + + + +
public fun asset_metadata(fa: &fungible_asset::FungibleAsset): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun asset_metadata(fa: &FungibleAsset): Object<Metadata> {
+    fa.metadata
+}
+
+ + + +
+ + + +## Function `mint_ref_metadata` + +Get the underlying metadata object from the MintRef. + + +
public fun mint_ref_metadata(ref: &fungible_asset::MintRef): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun mint_ref_metadata(ref: &MintRef): Object<Metadata> {
+    ref.metadata
+}
+
+ + + +
+ + + +## Function `transfer_ref_metadata` + +Get the underlying metadata object from the TransferRef. + + +
public fun transfer_ref_metadata(ref: &fungible_asset::TransferRef): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun transfer_ref_metadata(ref: &TransferRef): Object<Metadata> {
+    ref.metadata
+}
+
+ + + +
+ + + +## Function `burn_ref_metadata` + +Get the underlying metadata object from the BurnRef. + + +
public fun burn_ref_metadata(ref: &fungible_asset::BurnRef): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun burn_ref_metadata(ref: &BurnRef): Object<Metadata> {
+    ref.metadata
+}
+
+ + + +
+ + + +## Function `object_from_metadata_ref` + +Get the underlying metadata object from the MutateMetadataRef. + + +
public fun object_from_metadata_ref(ref: &fungible_asset::MutateMetadataRef): object::Object<fungible_asset::Metadata>
+
+ + + +
+Implementation + + +
public fun object_from_metadata_ref(ref: &MutateMetadataRef): Object<Metadata> {
+    ref.metadata
+}
+
+ + + +
+ + + +## Function `transfer` + +Transfer an amount of fungible asset from from_store, which should be owned by sender, to receiver. +Note: it does not move the underlying object. + + +
public entry fun transfer<T: key>(sender: &signer, from: object::Object<T>, to: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer<T: key>(
+    sender: &signer,
+    from: Object<T>,
+    to: Object<T>,
+    amount: u64,
+) acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance {
+    let fa = withdraw(sender, from, amount);
+    deposit(to, fa);
+}
+
+ + + +
+ + + +## Function `create_store` + +Allow an object to hold a store for fungible assets. +Applications can use this to create multiple stores for isolating fungible assets for different purposes. + + +
public fun create_store<T: key>(constructor_ref: &object::ConstructorRef, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
+
+ + + +
+Implementation + + +
public fun create_store<T: key>(
+    constructor_ref: &ConstructorRef,
+    metadata: Object<T>,
+): Object<FungibleStore> {
+    let store_obj = &object::generate_signer(constructor_ref);
+    move_to(store_obj, FungibleStore {
+        metadata: object::convert(metadata),
+        balance: 0,
+        frozen: false,
+    });
+
+    if (is_untransferable(metadata)) {
+        object::set_untransferable(constructor_ref);
+    };
+
+    if (default_to_concurrent_fungible_balance()) {
+        move_to(store_obj, ConcurrentFungibleBalance {
+            balance: aggregator_v2::create_unbounded_aggregator(),
+        });
+    };
+
+    object::object_from_constructor_ref<FungibleStore>(constructor_ref)
+}
+
+ + + +
+ + + +## Function `remove_store` + +Used to delete a store. Requires the store to be completely empty prior to removing it + + +
public fun remove_store(delete_ref: &object::DeleteRef)
+
+ + + +
+Implementation + + +
public fun remove_store(delete_ref: &DeleteRef) acquires FungibleStore, FungibleAssetEvents, ConcurrentFungibleBalance {
+    let store = &object::object_from_delete_ref<FungibleStore>(delete_ref);
+    let addr = object::object_address(store);
+    let FungibleStore { metadata: _, balance, frozen: _ }
+        = move_from<FungibleStore>(addr);
+    assert!(balance == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO));
+
+    if (concurrent_fungible_balance_exists_inline(addr)) {
+        let ConcurrentFungibleBalance { balance } = move_from<ConcurrentFungibleBalance>(addr);
+        assert!(aggregator_v2::read(&balance) == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO));
+    };
+
+    // Cleanup deprecated event handles if exist.
+    if (exists<FungibleAssetEvents>(addr)) {
+        let FungibleAssetEvents {
+            deposit_events,
+            withdraw_events,
+            frozen_events,
+        } = move_from<FungibleAssetEvents>(addr);
+        event::destroy_handle(deposit_events);
+        event::destroy_handle(withdraw_events);
+        event::destroy_handle(frozen_events);
+    };
+}
+
+ + + +
+ + + +## Function `withdraw` + +Withdraw amount of the fungible asset from store by the owner. + + +
public fun withdraw<T: key>(owner: &signer, store: object::Object<T>, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun withdraw<T: key>(
+    owner: &signer,
+    store: Object<T>,
+    amount: u64,
+): FungibleAsset acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance {
+    withdraw_sanity_check(owner, store, true);
+    withdraw_internal(object::object_address(&store), amount)
+}
+
+ + + +
+ + + +## Function `withdraw_sanity_check` + +Check the permission for withdraw operation. + + +
public(friend) fun withdraw_sanity_check<T: key>(owner: &signer, store: object::Object<T>, abort_on_dispatch: bool)
+
+ + + +
+Implementation + + +
public(friend) fun withdraw_sanity_check<T: key>(
+    owner: &signer,
+    store: Object<T>,
+    abort_on_dispatch: bool,
+) acquires FungibleStore, DispatchFunctionStore {
+    assert!(object::owns(store, signer::address_of(owner)), error::permission_denied(ENOT_STORE_OWNER));
+    let fa_store = borrow_store_resource(&store);
+    assert!(
+        !abort_on_dispatch || !has_withdraw_dispatch_function(fa_store.metadata),
+        error::invalid_argument(EINVALID_DISPATCHABLE_OPERATIONS)
+    );
+    assert!(!fa_store.frozen, error::permission_denied(ESTORE_IS_FROZEN));
+}
+
+ + + +
+ + + +## Function `deposit_sanity_check` + +Deposit amount of the fungible asset to store. + + +
public fun deposit_sanity_check<T: key>(store: object::Object<T>, abort_on_dispatch: bool)
+
+ + + +
+Implementation + + +
public fun deposit_sanity_check<T: key>(
+    store: Object<T>,
+    abort_on_dispatch: bool
+) acquires FungibleStore, DispatchFunctionStore {
+    let fa_store = borrow_store_resource(&store);
+    assert!(
+        !abort_on_dispatch || !has_deposit_dispatch_function(fa_store.metadata),
+        error::invalid_argument(EINVALID_DISPATCHABLE_OPERATIONS)
+    );
+    assert!(!fa_store.frozen, error::permission_denied(ESTORE_IS_FROZEN));
+}
+
+ + + +
+ + + +## Function `deposit` + +Deposit amount of the fungible asset to store. + + +
public fun deposit<T: key>(store: object::Object<T>, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun deposit<T: key>(store: Object<T>, fa: FungibleAsset) acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance {
+    deposit_sanity_check(store, true);
+    deposit_internal(object::object_address(&store), fa);
+}
+
+ + + +
+ + + +## Function `mint` + +Mint the specified amount of the fungible asset. + + +
public fun mint(ref: &fungible_asset::MintRef, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun mint(ref: &MintRef, amount: u64): FungibleAsset acquires Supply, ConcurrentSupply {
+    let metadata = ref.metadata;
+    mint_internal(metadata, amount)
+}
+
+ + + +
+ + + +## Function `mint_internal` + +CAN ONLY BE CALLED BY coin.move for migration. + + +
public(friend) fun mint_internal(metadata: object::Object<fungible_asset::Metadata>, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public(friend) fun mint_internal(
+    metadata: Object<Metadata>,
+    amount: u64
+): FungibleAsset acquires Supply, ConcurrentSupply {
+    increase_supply(&metadata, amount);
+    FungibleAsset {
+        metadata,
+        amount
+    }
+}
+
+ + + +
+ + + +## Function `mint_to` + +Mint the specified amount of the fungible asset to a destination store. + + +
public fun mint_to<T: key>(ref: &fungible_asset::MintRef, store: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public fun mint_to<T: key>(ref: &MintRef, store: Object<T>, amount: u64)
+acquires FungibleStore, Supply, ConcurrentSupply, DispatchFunctionStore, ConcurrentFungibleBalance {
+    deposit_sanity_check(store, false);
+    deposit_internal(object::object_address(&store), mint(ref, amount));
+}
+
+ + + +
+ + + +## Function `set_frozen_flag` + +Enable/disable a store's ability to do direct transfers of the fungible asset. + + +
public fun set_frozen_flag<T: key>(ref: &fungible_asset::TransferRef, store: object::Object<T>, frozen: bool)
+
+ + + +
+Implementation + + +
public fun set_frozen_flag<T: key>(
+    ref: &TransferRef,
+    store: Object<T>,
+    frozen: bool,
+) acquires FungibleStore {
+    assert!(
+        ref.metadata == store_metadata(store),
+        error::invalid_argument(ETRANSFER_REF_AND_STORE_MISMATCH),
+    );
+    set_frozen_flag_internal(store, frozen)
+}
+
+ + + +
+ + + +## Function `set_frozen_flag_internal` + + + +
public(friend) fun set_frozen_flag_internal<T: key>(store: object::Object<T>, frozen: bool)
+
+ + + +
+Implementation + + +
public(friend) fun set_frozen_flag_internal<T: key>(
+    store: Object<T>,
+    frozen: bool
+) acquires FungibleStore {
+    let store_addr = object::object_address(&store);
+    borrow_global_mut<FungibleStore>(store_addr).frozen = frozen;
+
+    event::emit(Frozen { store: store_addr, frozen });
+}
+
+ + + +
+ + + +## Function `burn` + +Burns a fungible asset + + +
public fun burn(ref: &fungible_asset::BurnRef, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun burn(ref: &BurnRef, fa: FungibleAsset) acquires Supply, ConcurrentSupply {
+    assert!(
+        ref.metadata == metadata_from_asset(&fa),
+        error::invalid_argument(EBURN_REF_AND_FUNGIBLE_ASSET_MISMATCH)
+    );
+    burn_internal(fa);
+}
+
+ + + +
+ + + +## Function `burn_internal` + +CAN ONLY BE CALLED BY coin.move for migration. + + +
public(friend) fun burn_internal(fa: fungible_asset::FungibleAsset): u64
+
+ + + +
+Implementation + + +
public(friend) fun burn_internal(
+    fa: FungibleAsset
+): u64 acquires Supply, ConcurrentSupply {
+    let FungibleAsset {
+        metadata,
+        amount
+    } = fa;
+    decrease_supply(&metadata, amount);
+    amount
+}
+
+ + + +
+ + + +## Function `burn_from` + +Burn the amount of the fungible asset from the given store. + + +
public fun burn_from<T: key>(ref: &fungible_asset::BurnRef, store: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public fun burn_from<T: key>(
+    ref: &BurnRef,
+    store: Object<T>,
+    amount: u64
+) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance {
+    // ref metadata match is checked in burn() call
+    burn(ref, withdraw_internal(object::object_address(&store), amount));
+}
+
+ + + +
+ + + +## Function `address_burn_from` + + + +
public(friend) fun address_burn_from(ref: &fungible_asset::BurnRef, store_addr: address, amount: u64)
+
+ + + +
+Implementation + + +
public(friend) fun address_burn_from(
+    ref: &BurnRef,
+    store_addr: address,
+    amount: u64
+) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance {
+    // ref metadata match is checked in burn() call
+    burn(ref, withdraw_internal(store_addr, amount));
+}
+
+ + + +
+ + + +## Function `withdraw_with_ref` + +Withdraw amount of the fungible asset from the store ignoring frozen. + + +
public fun withdraw_with_ref<T: key>(ref: &fungible_asset::TransferRef, store: object::Object<T>, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun withdraw_with_ref<T: key>(
+    ref: &TransferRef,
+    store: Object<T>,
+    amount: u64
+): FungibleAsset acquires FungibleStore, ConcurrentFungibleBalance {
+    assert!(
+        ref.metadata == store_metadata(store),
+        error::invalid_argument(ETRANSFER_REF_AND_STORE_MISMATCH),
+    );
+    withdraw_internal(object::object_address(&store), amount)
+}
+
+ + + +
+ + + +## Function `deposit_with_ref` + +Deposit the fungible asset into the store ignoring frozen. + + +
public fun deposit_with_ref<T: key>(ref: &fungible_asset::TransferRef, store: object::Object<T>, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun deposit_with_ref<T: key>(
+    ref: &TransferRef,
+    store: Object<T>,
+    fa: FungibleAsset
+) acquires FungibleStore, ConcurrentFungibleBalance {
+    assert!(
+        ref.metadata == fa.metadata,
+        error::invalid_argument(ETRANSFER_REF_AND_FUNGIBLE_ASSET_MISMATCH)
+    );
+    deposit_internal(object::object_address(&store), fa);
+}
+
+ + + +
+ + + +## Function `transfer_with_ref` + +Transfer amount of the fungible asset with TransferRef even it is frozen. + + +
public fun transfer_with_ref<T: key>(transfer_ref: &fungible_asset::TransferRef, from: object::Object<T>, to: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public fun transfer_with_ref<T: key>(
+    transfer_ref: &TransferRef,
+    from: Object<T>,
+    to: Object<T>,
+    amount: u64,
+) acquires FungibleStore, ConcurrentFungibleBalance {
+    let fa = withdraw_with_ref(transfer_ref, from, amount);
+    deposit_with_ref(transfer_ref, to, fa);
+}
+
+ + + +
+ + + +## Function `mutate_metadata` + +Mutate specified fields of the fungible asset's Metadata. + + +
public fun mutate_metadata(metadata_ref: &fungible_asset::MutateMetadataRef, name: option::Option<string::String>, symbol: option::Option<string::String>, decimals: option::Option<u8>, icon_uri: option::Option<string::String>, project_uri: option::Option<string::String>)
+
+ + + +
+Implementation + + +
public fun mutate_metadata(
+    metadata_ref: &MutateMetadataRef,
+    name: Option<String>,
+    symbol: Option<String>,
+    decimals: Option<u8>,
+    icon_uri: Option<String>,
+    project_uri: Option<String>,
+) acquires Metadata {
+    let metadata_address = object::object_address(&metadata_ref.metadata);
+    let mutable_metadata = borrow_global_mut<Metadata>(metadata_address);
+
+    if (option::is_some(&name)){
+        let name = option::extract(&mut name);
+        assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG));
+        mutable_metadata.name = name;
+    };
+    if (option::is_some(&symbol)){
+        let symbol = option::extract(&mut symbol);
+        assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG));
+        mutable_metadata.symbol = symbol;
+    };
+    if (option::is_some(&decimals)){
+        let decimals = option::extract(&mut decimals);
+        assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE));
+        mutable_metadata.decimals = decimals;
+    };
+    if (option::is_some(&icon_uri)){
+        let icon_uri = option::extract(&mut icon_uri);
+        assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+        mutable_metadata.icon_uri = icon_uri;
+    };
+    if (option::is_some(&project_uri)){
+        let project_uri = option::extract(&mut project_uri);
+        assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+        mutable_metadata.project_uri = project_uri;
+    };
+}
+
+ + + +
+ + + +## Function `zero` + +Create a fungible asset with zero amount. +This can be useful when starting a series of computations where the initial value is 0. + + +
public fun zero<T: key>(metadata: object::Object<T>): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun zero<T: key>(metadata: Object<T>): FungibleAsset {
+    FungibleAsset {
+        metadata: object::convert(metadata),
+        amount: 0,
+    }
+}
+
+ + + +
+ + + +## Function `extract` + +Extract a given amount from the given fungible asset and return a new one. + + +
public fun extract(fungible_asset: &mut fungible_asset::FungibleAsset, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun extract(fungible_asset: &mut FungibleAsset, amount: u64): FungibleAsset {
+    assert!(fungible_asset.amount >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE));
+    fungible_asset.amount = fungible_asset.amount - amount;
+    FungibleAsset {
+        metadata: fungible_asset.metadata,
+        amount,
+    }
+}
+
+ + + +
+ + + +## Function `merge` + +"Merges" the two given fungible assets. The fungible asset passed in as dst_fungible_asset will have a value +equal to the sum of the two (dst_fungible_asset and src_fungible_asset). + + +
public fun merge(dst_fungible_asset: &mut fungible_asset::FungibleAsset, src_fungible_asset: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun merge(dst_fungible_asset: &mut FungibleAsset, src_fungible_asset: FungibleAsset) {
+    let FungibleAsset { metadata, amount } = src_fungible_asset;
+    assert!(metadata == dst_fungible_asset.metadata, error::invalid_argument(EFUNGIBLE_ASSET_MISMATCH));
+    dst_fungible_asset.amount = dst_fungible_asset.amount + amount;
+}
+
+ + + +
+ + + +## Function `destroy_zero` + +Destroy an empty fungible asset. + + +
public fun destroy_zero(fungible_asset: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun destroy_zero(fungible_asset: FungibleAsset) {
+    let FungibleAsset { amount, metadata: _ } = fungible_asset;
+    assert!(amount == 0, error::invalid_argument(EAMOUNT_IS_NOT_ZERO));
+}
+
+ + + +
+ + + +## Function `deposit_internal` + + + +
public(friend) fun deposit_internal(store_addr: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public(friend) fun deposit_internal(store_addr: address, fa: FungibleAsset) acquires FungibleStore, ConcurrentFungibleBalance {
+    let FungibleAsset { metadata, amount } = fa;
+    assert!(exists<FungibleStore>(store_addr), error::not_found(EFUNGIBLE_STORE_EXISTENCE));
+    let store = borrow_global_mut<FungibleStore>(store_addr);
+    assert!(metadata == store.metadata, error::invalid_argument(EFUNGIBLE_ASSET_AND_STORE_MISMATCH));
+
+    if (amount == 0) return;
+
+    if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
+        let balance_resource = borrow_global_mut<ConcurrentFungibleBalance>(store_addr);
+        aggregator_v2::add(&mut balance_resource.balance, amount);
+    } else {
+        store.balance = store.balance + amount;
+    };
+
+    event::emit(Deposit { store: store_addr, amount });
+}
+
+ + + +
+ + + +## Function `withdraw_internal` + +Extract amount of the fungible asset from store. + + +
public(friend) fun withdraw_internal(store_addr: address, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public(friend) fun withdraw_internal(
+    store_addr: address,
+    amount: u64,
+): FungibleAsset acquires FungibleStore, ConcurrentFungibleBalance {
+    assert!(exists<FungibleStore>(store_addr), error::not_found(EFUNGIBLE_STORE_EXISTENCE));
+
+    let store = borrow_global_mut<FungibleStore>(store_addr);
+    let metadata = store.metadata;
+    if (amount != 0) {
+        if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
+            let balance_resource = borrow_global_mut<ConcurrentFungibleBalance>(store_addr);
+            assert!(
+                aggregator_v2::try_sub(&mut balance_resource.balance, amount),
+                error::invalid_argument(EINSUFFICIENT_BALANCE)
+            );
+        } else {
+            assert!(store.balance >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE));
+            store.balance = store.balance - amount;
+        };
+
+        event::emit<Withdraw>(Withdraw { store: store_addr, amount });
+    };
+    FungibleAsset { metadata, amount }
+}
+
+ + + +
+ + + +## Function `increase_supply` + +Increase the supply of a fungible asset by minting. + + +
fun increase_supply<T: key>(metadata: &object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
fun increase_supply<T: key>(metadata: &Object<T>, amount: u64) acquires Supply, ConcurrentSupply {
+    if (amount == 0) {
+        return
+    };
+    let metadata_address = object::object_address(metadata);
+
+    if (exists<ConcurrentSupply>(metadata_address)) {
+        let supply = borrow_global_mut<ConcurrentSupply>(metadata_address);
+        assert!(
+            aggregator_v2::try_add(&mut supply.current, (amount as u128)),
+            error::out_of_range(EMAX_SUPPLY_EXCEEDED)
+        );
+    } else if (exists<Supply>(metadata_address)) {
+        let supply = borrow_global_mut<Supply>(metadata_address);
+        if (option::is_some(&supply.maximum)) {
+            let max = *option::borrow_mut(&mut supply.maximum);
+            assert!(
+                max - supply.current >= (amount as u128),
+                error::out_of_range(EMAX_SUPPLY_EXCEEDED)
+            )
+        };
+        supply.current = supply.current + (amount as u128);
+    } else {
+        abort error::not_found(ESUPPLY_NOT_FOUND)
+    }
+}
+
+ + + +
+ + + +## Function `decrease_supply` + +Decrease the supply of a fungible asset by burning. + + +
fun decrease_supply<T: key>(metadata: &object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
fun decrease_supply<T: key>(metadata: &Object<T>, amount: u64) acquires Supply, ConcurrentSupply {
+    if (amount == 0) {
+        return
+    };
+    let metadata_address = object::object_address(metadata);
+
+    if (exists<ConcurrentSupply>(metadata_address)) {
+        let supply = borrow_global_mut<ConcurrentSupply>(metadata_address);
+
+        assert!(
+            aggregator_v2::try_sub(&mut supply.current, (amount as u128)),
+            error::out_of_range(ESUPPLY_UNDERFLOW)
+        );
+    } else if (exists<Supply>(metadata_address)) {
+        assert!(exists<Supply>(metadata_address), error::not_found(ESUPPLY_NOT_FOUND));
+        let supply = borrow_global_mut<Supply>(metadata_address);
+        assert!(
+            supply.current >= (amount as u128),
+            error::invalid_state(ESUPPLY_UNDERFLOW)
+        );
+        supply.current = supply.current - (amount as u128);
+    } else {
+        assert!(false, error::not_found(ESUPPLY_NOT_FOUND));
+    }
+}
+
+ + + +
+ + + +## Function `borrow_fungible_metadata` + + + +
fun borrow_fungible_metadata<T: key>(metadata: &object::Object<T>): &fungible_asset::Metadata
+
+ + + +
+Implementation + + +
inline fun borrow_fungible_metadata<T: key>(
+    metadata: &Object<T>
+): &Metadata acquires Metadata {
+    let addr = object::object_address(metadata);
+    borrow_global<Metadata>(addr)
+}
+
+ + + +
+ + + +## Function `borrow_fungible_metadata_mut` + + + +
fun borrow_fungible_metadata_mut<T: key>(metadata: &object::Object<T>): &mut fungible_asset::Metadata
+
+ + + +
+Implementation + + +
inline fun borrow_fungible_metadata_mut<T: key>(
+    metadata: &Object<T>
+): &mut Metadata acquires Metadata {
+    let addr = object::object_address(metadata);
+    borrow_global_mut<Metadata>(addr)
+}
+
+ + + +
+ + + +## Function `borrow_store_resource` + + + +
fun borrow_store_resource<T: key>(store: &object::Object<T>): &fungible_asset::FungibleStore
+
+ + + +
+Implementation + + +
inline fun borrow_store_resource<T: key>(store: &Object<T>): &FungibleStore acquires FungibleStore {
+    let store_addr = object::object_address(store);
+    assert!(exists<FungibleStore>(store_addr), error::not_found(EFUNGIBLE_STORE_EXISTENCE));
+    borrow_global<FungibleStore>(store_addr)
+}
+
+ + + +
+ + + +## Function `upgrade_to_concurrent` + + + +
public fun upgrade_to_concurrent(ref: &object::ExtendRef)
+
+ + + +
+Implementation + + +
public fun upgrade_to_concurrent(
+    ref: &ExtendRef,
+) acquires Supply {
+    let metadata_object_address = object::address_from_extend_ref(ref);
+    let metadata_object_signer = object::generate_signer_for_extending(ref);
+    assert!(
+        features::concurrent_fungible_assets_enabled(),
+        error::invalid_argument(ECONCURRENT_SUPPLY_NOT_ENABLED)
+    );
+    assert!(exists<Supply>(metadata_object_address), error::not_found(ESUPPLY_NOT_FOUND));
+    let Supply {
+        current,
+        maximum,
+    } = move_from<Supply>(metadata_object_address);
+
+    let unlimited = option::is_none(&maximum);
+    let supply = ConcurrentSupply {
+        current: if (unlimited) {
+            aggregator_v2::create_unbounded_aggregator_with_value(current)
+        }
+        else {
+            aggregator_v2::create_aggregator_with_value(current, option::extract(&mut maximum))
+        },
+    };
+    move_to(&metadata_object_signer, supply);
+}
+
+ + + +
+ + + +## Function `upgrade_store_to_concurrent` + + + +
public entry fun upgrade_store_to_concurrent<T: key>(owner: &signer, store: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun upgrade_store_to_concurrent<T: key>(
+    owner: &signer,
+    store: Object<T>,
+) acquires FungibleStore {
+    assert!(object::owns(store, signer::address_of(owner)), error::permission_denied(ENOT_STORE_OWNER));
+    assert!(!is_frozen(store), error::invalid_argument(ESTORE_IS_FROZEN));
+    assert!(allow_upgrade_to_concurrent_fungible_balance(), error::invalid_argument(ECONCURRENT_BALANCE_NOT_ENABLED));
+    ensure_store_upgraded_to_concurrent_internal(object::object_address(&store));
+}
+
+ + + +
+ + + +## Function `ensure_store_upgraded_to_concurrent_internal` + +Ensure a known FungibleStore has ConcurrentFungibleBalance. + + +
fun ensure_store_upgraded_to_concurrent_internal(fungible_store_address: address)
+
+ + + +
+Implementation + + +
fun ensure_store_upgraded_to_concurrent_internal(
+    fungible_store_address: address,
+) acquires FungibleStore {
+    if (exists<ConcurrentFungibleBalance>(fungible_store_address)) {
+        return
+    };
+    let store = borrow_global_mut<FungibleStore>(fungible_store_address);
+    let balance = aggregator_v2::create_unbounded_aggregator_with_value(store.balance);
+    store.balance = 0;
+    let object_signer = create_signer::create_signer(fungible_store_address);
+    move_to(&object_signer, ConcurrentFungibleBalance { balance });
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The metadata associated with the fungible asset is subject to precise size constraints.MediumThe add_fungibility function has size limitations for the name, symbol, number of decimals, icon_uri, and project_uri field of the Metadata resource.This has been audited.
2Adding fungibility to an existing object should initialize the metadata and supply resources and store them under the metadata object address.LowThe add_fungibility function initializes the Metadata and Supply resources and moves them under the metadata object.Audited that the Metadata and Supply resources are initialized properly.
3Generating mint, burn and transfer references can only be done at object creation time and if the object was added fungibility.LowThe following functions generate the related references of the Metadata object: 1. generate_mint_ref 2. generate_burn_ref 3. generate_transfer_refAudited that the Metadata object exists within the constructor ref.
4Only the owner of a store should be allowed to withdraw fungible assets from it.HighThe fungible_asset::withdraw function ensures that the signer owns the store by asserting that the object address matches the address of the signer.Audited that the address of the signer owns the object.
5The transfer, withdrawal and deposit operation should never change the current supply of the fungible asset.HighThe transfer function withdraws the fungible assets from the store and deposits them to the receiver. The withdraw function extracts the fungible asset from the fungible asset store. The deposit function adds the balance to the fungible asset store.Audited that the supply before and after the operation remains constant.
6The owner of the store should only be able to withdraw a certain amount if its store has sufficient balance and is not frozen, unless the withdrawal is performed with a reference, and afterwards the store balance should be decreased.HighThe withdraw function ensures that the store is not frozen before calling withdraw_internal which ensures that the withdrawing amount is greater than 0 and less than the total balance from the store. The withdraw_with_ref ensures that the reference's metadata matches the store metadata.Audited that it aborts if the withdrawing store is frozen. Audited that it aborts if the store doesn't have sufficient balance. Audited that the balance of the withdrawing store is reduced by amount.
7Only the same type of fungible assets should be deposited in a fungible asset store, if the store is not frozen, unless the deposit is performed with a reference, and afterwards the store balance should be increased.HighThe deposit function ensures that store is not frozen and proceeds to call the deposit_internal function which validates the store's metadata and the depositing asset's metadata followed by increasing the store balance by the given amount. The deposit_with_ref ensures that the reference's metadata matches the depositing asset's metadata.Audited that it aborts if the store is frozen. Audited that it aborts if the asset and asset store are different. Audited that the store's balance is increased by the deposited amount.
8An object should only be allowed to hold one store for fungible assets.MediumThe create_store function initializes a new FungibleStore resource and moves it under the object address.Audited that the resource was moved under the object.
9When a new store is created, the balance should be set by default to the value zero.HighThe create_store function initializes a new fungible asset store with zero balance and stores it under the given construtorRef object.Audited that the store is properly initialized with zero balance.
10A store should only be deleted if its balance is zero.MediumThe remove_store function validates the store's balance and removes the store under the object address.Audited that aborts if the balance of the store is not zero. Audited that store is removed from the object address.
11Minting and burning should alter the total supply value, and the store balances.HighThe mint process increases the total supply by the amount minted using the increase_supply function. The burn process withdraws the burn amount from the given store and decreases the total supply by the amount burned using the decrease_supply function.Audited the mint and burn functions that the supply was adjusted accordingly.
12It must not be possible to burn an amount of fungible assets larger than their current supply.HighThe burn process ensures that the store has enough balance to burn, by asserting that the supply.current >= amount inside the decrease_supply function.Audited that it aborts if the provided store doesn't have sufficient balance.
13Enabling or disabling store's frozen status should only be done with a valid transfer reference.HighThe set_frozen_flag function ensures that the TransferRef is provided via function argument and that the store's metadata matches the metadata from the reference. It then proceeds to update the frozen flag of the store.Audited that it aborts if the metadata doesn't match. Audited that the frozen flag is updated properly.
14Extracting a specific amount from the fungible asset should be possible only if the total amount that it holds is greater or equal to the provided amount.HighThe extract function validates that the fungible asset has enough balance to extract and then updates it by subtracting the extracted amount.Audited that it aborts if the asset didn't have sufficient balance. Audited that the balance of the asset is updated. Audited that the extract function returns the extracted asset.
15Merging two fungible assets should only be possible if both share the same metadata.MediumThe merge function validates the metadata of the src and dst asset.Audited that it aborts if the metadata of the src and dst are not the same.
16Post merging two fungible assets, the source asset should have the amount value equal to the sum of the two.HighThe merge function increases dst_fungible_asset.amount by src_fungible_asset.amount.Audited that the dst_fungible_asset balance is increased by amount.
17Fungible assets with zero balance should be destroyed when the amount reaches value 0.MediumThe destroy_zero ensures that the balance of the asset has the value 0 and destroy the asset.Audited that it aborts if the balance of the asset is non zero.
+ + + + + + +### Module-level Specification + + +
pragma verify=false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/gas_schedule.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/gas_schedule.md new file mode 100644 index 0000000000000..5ff6876819e9e --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/gas_schedule.md @@ -0,0 +1,658 @@ + + + +# Module `0x1::gas_schedule` + +This module defines structs and methods to initialize the gas schedule, which dictates how much +it costs to execute Move on the network. + + +- [Struct `GasEntry`](#0x1_gas_schedule_GasEntry) +- [Resource `GasSchedule`](#0x1_gas_schedule_GasSchedule) +- [Resource `GasScheduleV2`](#0x1_gas_schedule_GasScheduleV2) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_gas_schedule_initialize) +- [Function `set_gas_schedule`](#0x1_gas_schedule_set_gas_schedule) +- [Function `set_for_next_epoch`](#0x1_gas_schedule_set_for_next_epoch) +- [Function `set_for_next_epoch_check_hash`](#0x1_gas_schedule_set_for_next_epoch_check_hash) +- [Function `on_new_epoch`](#0x1_gas_schedule_on_new_epoch) +- [Function `set_storage_gas_config`](#0x1_gas_schedule_set_storage_gas_config) +- [Function `set_storage_gas_config_for_next_epoch`](#0x1_gas_schedule_set_storage_gas_config_for_next_epoch) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `set_gas_schedule`](#@Specification_1_set_gas_schedule) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `set_for_next_epoch_check_hash`](#@Specification_1_set_for_next_epoch_check_hash) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `set_storage_gas_config`](#@Specification_1_set_storage_gas_config) + - [Function `set_storage_gas_config_for_next_epoch`](#@Specification_1_set_storage_gas_config_for_next_epoch) + + +
use 0x1::aptos_hash;
+use 0x1::bcs;
+use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::error;
+use 0x1::reconfiguration;
+use 0x1::storage_gas;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::util;
+use 0x1::vector;
+
+ + + + + +## Struct `GasEntry` + + + +
struct GasEntry has copy, drop, store
+
+ + + +
+Fields + + +
+
+key: string::String +
+
+ +
+
+val: u64 +
+
+ +
+
+ + +
+ + + +## Resource `GasSchedule` + + + +
struct GasSchedule has copy, drop, key
+
+ + + +
+Fields + + +
+
+entries: vector<gas_schedule::GasEntry> +
+
+ +
+
+ + +
+ + + +## Resource `GasScheduleV2` + + + +
struct GasScheduleV2 has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+feature_version: u64 +
+
+ +
+
+entries: vector<gas_schedule::GasEntry> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EINVALID_GAS_FEATURE_VERSION: u64 = 2;
+
+ + + + + +The provided gas schedule bytes are empty or invalid + + +
const EINVALID_GAS_SCHEDULE: u64 = 1;
+
+ + + + + + + +
const EINVALID_GAS_SCHEDULE_HASH: u64 = 3;
+
+ + + + + +## Function `initialize` + +Only called during genesis. + + +
public(friend) fun initialize(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer, gas_schedule_blob: vector<u8>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(!vector::is_empty(&gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE));
+
+    // TODO(Gas): check if gas schedule is consistent
+    let gas_schedule: GasScheduleV2 = from_bytes(gas_schedule_blob);
+    move_to<GasScheduleV2>(aptos_framework, gas_schedule);
+}
+
+ + + +
+ + + +## Function `set_gas_schedule` + +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun set_gas_schedule(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_gas_schedule(aptos_framework: &signer, gas_schedule_blob: vector<u8>) acquires GasSchedule, GasScheduleV2 {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(!vector::is_empty(&gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE));
+    chain_status::assert_genesis();
+
+    if (exists<GasScheduleV2>(@aptos_framework)) {
+        let gas_schedule = borrow_global_mut<GasScheduleV2>(@aptos_framework);
+        let new_gas_schedule: GasScheduleV2 = from_bytes(gas_schedule_blob);
+        assert!(new_gas_schedule.feature_version >= gas_schedule.feature_version,
+            error::invalid_argument(EINVALID_GAS_FEATURE_VERSION));
+        // TODO(Gas): check if gas schedule is consistent
+        *gas_schedule = new_gas_schedule;
+    }
+    else {
+        if (exists<GasSchedule>(@aptos_framework)) {
+            _ = move_from<GasSchedule>(@aptos_framework);
+        };
+        let new_gas_schedule: GasScheduleV2 = from_bytes(gas_schedule_blob);
+        // TODO(Gas): check if gas schedule is consistent
+        move_to<GasScheduleV2>(aptos_framework, new_gas_schedule);
+    };
+
+    // Need to trigger reconfiguration so validator nodes can sync on the updated gas schedule.
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +Set the gas schedule for the next epoch, typically called by on-chain governance. +Abort if the version of the given schedule is lower than the current version. + +Example usage: +``` +aptos_framework::gas_schedule::set_for_next_epoch(&framework_signer, some_gas_schedule_blob); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector<u8>) acquires GasScheduleV2 {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(!vector::is_empty(&gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE));
+    let new_gas_schedule: GasScheduleV2 = from_bytes(gas_schedule_blob);
+    if (exists<GasScheduleV2>(@aptos_framework)) {
+        let cur_gas_schedule = borrow_global<GasScheduleV2>(@aptos_framework);
+        assert!(
+            new_gas_schedule.feature_version >= cur_gas_schedule.feature_version,
+            error::invalid_argument(EINVALID_GAS_FEATURE_VERSION)
+        );
+    };
+    config_buffer::upsert(new_gas_schedule);
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch_check_hash` + +Set the gas schedule for the next epoch, typically called by on-chain governance. +Abort if the version of the given schedule is lower than the current version. +Require a hash of the old gas schedule to be provided and will abort if the hashes mismatch. + + +
public fun set_for_next_epoch_check_hash(aptos_framework: &signer, old_gas_schedule_hash: vector<u8>, new_gas_schedule_blob: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch_check_hash(
+    aptos_framework: &signer,
+    old_gas_schedule_hash: vector<u8>,
+    new_gas_schedule_blob: vector<u8>
+) acquires GasScheduleV2 {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(!vector::is_empty(&new_gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE));
+
+    let new_gas_schedule: GasScheduleV2 = from_bytes(new_gas_schedule_blob);
+    if (exists<GasScheduleV2>(@aptos_framework)) {
+        let cur_gas_schedule = borrow_global<GasScheduleV2>(@aptos_framework);
+        assert!(
+            new_gas_schedule.feature_version >= cur_gas_schedule.feature_version,
+            error::invalid_argument(EINVALID_GAS_FEATURE_VERSION)
+        );
+        let cur_gas_schedule_bytes = bcs::to_bytes(cur_gas_schedule);
+        let cur_gas_schedule_hash = aptos_hash::sha3_512(cur_gas_schedule_bytes);
+        assert!(
+            cur_gas_schedule_hash == old_gas_schedule_hash,
+            error::invalid_argument(EINVALID_GAS_SCHEDULE_HASH)
+        );
+    };
+
+    config_buffer::upsert(new_gas_schedule);
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending GasScheduleV2, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires GasScheduleV2 {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<GasScheduleV2>()) {
+        let new_gas_schedule = config_buffer::extract<GasScheduleV2>();
+        if (exists<GasScheduleV2>(@aptos_framework)) {
+            *borrow_global_mut<GasScheduleV2>(@aptos_framework) = new_gas_schedule;
+        } else {
+            move_to(framework, new_gas_schedule);
+        }
+    }
+}
+
+ + + +
+ + + +## Function `set_storage_gas_config` + + + +
public fun set_storage_gas_config(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + +
+Implementation + + +
public fun set_storage_gas_config(aptos_framework: &signer, config: StorageGasConfig) {
+    storage_gas::set_config(aptos_framework, config);
+    // Need to trigger reconfiguration so the VM is guaranteed to load the new gas fee starting from the next
+    // transaction.
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `set_storage_gas_config_for_next_epoch` + + + +
public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + +
+Implementation + + +
public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: StorageGasConfig) {
+    storage_gas::set_config(aptos_framework, config);
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During genesis, the Aptos framework account should be assigned the gas schedule resource.MediumThe gas_schedule::initialize function calls the assert_aptos_framework function to ensure that the signer is the aptos_framework and then assigns the GasScheduleV2 resource to it.Formally verified via initialize.
2Only the Aptos framework account should be allowed to update the gas schedule resource.CriticalThe gas_schedule::set_gas_schedule function calls the assert_aptos_framework function to ensure that the signer is the aptos framework account.Formally verified via set_gas_schedule.
3Only valid gas schedule should be allowed for initialization and update.MediumThe initialize and set_gas_schedule functions ensures that the gas_schedule_blob is not empty.Formally verified via initialize and set_gas_schedule.
4Only a gas schedule with the feature version greater or equal than the current feature version is allowed to be provided when performing an update operation.MediumThe set_gas_schedule function validates the feature_version of the new_gas_schedule by ensuring that it is greater or equal than the current gas_schedule.feature_version.Formally verified via set_gas_schedule.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+// This enforces high-level requirement 3:
+aborts_if len(gas_schedule_blob) == 0;
+aborts_if exists<GasScheduleV2>(addr);
+ensures exists<GasScheduleV2>(addr);
+
+ + + + + +### Function `set_gas_schedule` + + +
public fun set_gas_schedule(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + + +
pragma verify_duration_estimate = 600;
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+requires chain_status::is_genesis();
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+include staking_config::StakingRewardsConfigRequirement;
+// This enforces high-level requirement 2:
+include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+// This enforces high-level requirement 3:
+aborts_if len(gas_schedule_blob) == 0;
+let new_gas_schedule = util::spec_from_bytes<GasScheduleV2>(gas_schedule_blob);
+let gas_schedule = global<GasScheduleV2>(@aptos_framework);
+// This enforces high-level requirement 4:
+aborts_if exists<GasScheduleV2>(@aptos_framework) && new_gas_schedule.feature_version < gas_schedule.feature_version;
+ensures exists<GasScheduleV2>(signer::address_of(aptos_framework));
+ensures global<GasScheduleV2>(@aptos_framework) == new_gas_schedule;
+
+ + + + + +### Function `set_for_next_epoch` + + +
public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+include config_buffer::SetForNextEpochAbortsIf {
+    account: aptos_framework,
+    config: gas_schedule_blob
+};
+let new_gas_schedule = util::spec_from_bytes<GasScheduleV2>(gas_schedule_blob);
+let cur_gas_schedule = global<GasScheduleV2>(@aptos_framework);
+aborts_if exists<GasScheduleV2>(@aptos_framework) && new_gas_schedule.feature_version < cur_gas_schedule.feature_version;
+
+ + + + + +### Function `set_for_next_epoch_check_hash` + + +
public fun set_for_next_epoch_check_hash(aptos_framework: &signer, old_gas_schedule_hash: vector<u8>, new_gas_schedule_blob: vector<u8>)
+
+ + + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+include config_buffer::SetForNextEpochAbortsIf {
+    account: aptos_framework,
+    config: new_gas_schedule_blob
+};
+let new_gas_schedule = util::spec_from_bytes<GasScheduleV2>(new_gas_schedule_blob);
+let cur_gas_schedule = global<GasScheduleV2>(@aptos_framework);
+aborts_if exists<GasScheduleV2>(@aptos_framework) && new_gas_schedule.feature_version < cur_gas_schedule.feature_version;
+aborts_if exists<GasScheduleV2>(@aptos_framework) && (!features::spec_sha_512_and_ripemd_160_enabled() || aptos_hash::spec_sha3_512_internal(bcs::serialize(cur_gas_schedule)) != old_gas_schedule_hash);
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<GasScheduleV2>;
+aborts_if false;
+
+ + + + + +### Function `set_storage_gas_config` + + +
public fun set_storage_gas_config(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + + +
pragma verify_duration_estimate = 600;
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+include staking_config::StakingRewardsConfigRequirement;
+aborts_if !exists<StorageGasConfig>(@aptos_framework);
+ensures global<StorageGasConfig>(@aptos_framework) == config;
+
+ + + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+aborts_if !exists<storage_gas::StorageGasConfig>(@aptos_framework);
+
+ + + + + +### Function `set_storage_gas_config_for_next_epoch` + + +
public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+aborts_if !exists<storage_gas::StorageGasConfig>(@aptos_framework);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/genesis.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/genesis.md new file mode 100644 index 0000000000000..3136d00fa3994 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/genesis.md @@ -0,0 +1,1085 @@ + + + +# Module `0x1::genesis` + + + +- [Struct `AccountMap`](#0x1_genesis_AccountMap) +- [Struct `EmployeeAccountMap`](#0x1_genesis_EmployeeAccountMap) +- [Struct `ValidatorConfiguration`](#0x1_genesis_ValidatorConfiguration) +- [Struct `ValidatorConfigurationWithCommission`](#0x1_genesis_ValidatorConfigurationWithCommission) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_genesis_initialize) +- [Function `initialize_aptos_coin`](#0x1_genesis_initialize_aptos_coin) +- [Function `initialize_core_resources_and_aptos_coin`](#0x1_genesis_initialize_core_resources_and_aptos_coin) +- [Function `create_accounts`](#0x1_genesis_create_accounts) +- [Function `create_account`](#0x1_genesis_create_account) +- [Function `create_employee_validators`](#0x1_genesis_create_employee_validators) +- [Function `create_initialize_validators_with_commission`](#0x1_genesis_create_initialize_validators_with_commission) +- [Function `create_initialize_validators`](#0x1_genesis_create_initialize_validators) +- [Function `create_initialize_validator`](#0x1_genesis_create_initialize_validator) +- [Function `initialize_validator`](#0x1_genesis_initialize_validator) +- [Function `set_genesis_end`](#0x1_genesis_set_genesis_end) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `initialize_aptos_coin`](#@Specification_1_initialize_aptos_coin) + - [Function `create_initialize_validators_with_commission`](#@Specification_1_create_initialize_validators_with_commission) + - [Function `create_initialize_validators`](#@Specification_1_create_initialize_validators) + - [Function `create_initialize_validator`](#@Specification_1_create_initialize_validator) + - [Function `set_genesis_end`](#@Specification_1_set_genesis_end) + + +
use 0x1::account;
+use 0x1::aggregator_factory;
+use 0x1::aptos_account;
+use 0x1::aptos_coin;
+use 0x1::aptos_governance;
+use 0x1::block;
+use 0x1::chain_id;
+use 0x1::chain_status;
+use 0x1::coin;
+use 0x1::consensus_config;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::execution_config;
+use 0x1::fixed_point32;
+use 0x1::gas_schedule;
+use 0x1::reconfiguration;
+use 0x1::simple_map;
+use 0x1::stake;
+use 0x1::staking_config;
+use 0x1::staking_contract;
+use 0x1::state_storage;
+use 0x1::storage_gas;
+use 0x1::timestamp;
+use 0x1::transaction_fee;
+use 0x1::transaction_validation;
+use 0x1::vector;
+use 0x1::version;
+use 0x1::vesting;
+
+ + + + + +## Struct `AccountMap` + + + +
struct AccountMap has drop
+
+ + + +
+Fields + + +
+
+account_address: address +
+
+ +
+
+balance: u64 +
+
+ +
+
+ + +
+ + + +## Struct `EmployeeAccountMap` + + + +
struct EmployeeAccountMap has copy, drop
+
+ + + +
+Fields + + +
+
+accounts: vector<address> +
+
+ +
+
+validator: genesis::ValidatorConfigurationWithCommission +
+
+ +
+
+vesting_schedule_numerator: vector<u64> +
+
+ +
+
+vesting_schedule_denominator: u64 +
+
+ +
+
+beneficiary_resetter: address +
+
+ +
+
+ + +
+ + + +## Struct `ValidatorConfiguration` + + + +
struct ValidatorConfiguration has copy, drop
+
+ + + +
+Fields + + +
+
+owner_address: address +
+
+ +
+
+operator_address: address +
+
+ +
+
+voter_address: address +
+
+ +
+
+stake_amount: u64 +
+
+ +
+
+consensus_pubkey: vector<u8> +
+
+ +
+
+proof_of_possession: vector<u8> +
+
+ +
+
+network_addresses: vector<u8> +
+
+ +
+
+full_node_network_addresses: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `ValidatorConfigurationWithCommission` + + + +
struct ValidatorConfigurationWithCommission has copy, drop
+
+ + + +
+Fields + + +
+
+validator_config: genesis::ValidatorConfiguration +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+join_during_genesis: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EACCOUNT_DOES_NOT_EXIST: u64 = 2;
+
+ + + + + + + +
const EDUPLICATE_ACCOUNT: u64 = 1;
+
+ + + + + +## Function `initialize` + +Genesis step 1: Initialize aptos framework account and core modules on chain. + + +
fun initialize(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64)
+
+ + + +
+Implementation + + +
fun initialize(
+    gas_schedule: vector<u8>,
+    chain_id: u8,
+    initial_version: u64,
+    consensus_config: vector<u8>,
+    execution_config: vector<u8>,
+    epoch_interval_microsecs: u64,
+    minimum_stake: u64,
+    maximum_stake: u64,
+    recurring_lockup_duration_secs: u64,
+    allow_validator_set_change: bool,
+    rewards_rate: u64,
+    rewards_rate_denominator: u64,
+    voting_power_increase_limit: u64,
+) {
+    // Initialize the aptos framework account. This is the account where system resources and modules will be
+    // deployed to. This will be entirely managed by on-chain governance and no entities have the key or privileges
+    // to use this account.
+    let (aptos_framework_account, aptos_framework_signer_cap) = account::create_framework_reserved_account(@aptos_framework);
+    // Initialize account configs on aptos framework account.
+    account::initialize(&aptos_framework_account);
+
+    transaction_validation::initialize(
+        &aptos_framework_account,
+        b"script_prologue",
+        b"module_prologue",
+        b"multi_agent_script_prologue",
+        b"epilogue",
+    );
+
+    // Give the decentralized on-chain governance control over the core framework account.
+    aptos_governance::store_signer_cap(&aptos_framework_account, @aptos_framework, aptos_framework_signer_cap);
+
+    // put reserved framework reserved accounts under aptos governance
+    let framework_reserved_addresses = vector<address>[@0x2, @0x3, @0x4, @0x5, @0x6, @0x7, @0x8, @0x9, @0xa];
+    while (!vector::is_empty(&framework_reserved_addresses)) {
+        let address = vector::pop_back<address>(&mut framework_reserved_addresses);
+        let (_, framework_signer_cap) = account::create_framework_reserved_account(address);
+        aptos_governance::store_signer_cap(&aptos_framework_account, address, framework_signer_cap);
+    };
+
+    consensus_config::initialize(&aptos_framework_account, consensus_config);
+    execution_config::set(&aptos_framework_account, execution_config);
+    version::initialize(&aptos_framework_account, initial_version);
+    stake::initialize(&aptos_framework_account);
+    staking_config::initialize(
+        &aptos_framework_account,
+        minimum_stake,
+        maximum_stake,
+        recurring_lockup_duration_secs,
+        allow_validator_set_change,
+        rewards_rate,
+        rewards_rate_denominator,
+        voting_power_increase_limit,
+    );
+    storage_gas::initialize(&aptos_framework_account);
+    gas_schedule::initialize(&aptos_framework_account, gas_schedule);
+
+    // Ensure we can create aggregators for supply, but not enable it for common use just yet.
+    aggregator_factory::initialize_aggregator_factory(&aptos_framework_account);
+    coin::initialize_supply_config(&aptos_framework_account);
+
+    chain_id::initialize(&aptos_framework_account, chain_id);
+    reconfiguration::initialize(&aptos_framework_account);
+    block::initialize(&aptos_framework_account, epoch_interval_microsecs);
+    state_storage::initialize(&aptos_framework_account);
+    timestamp::set_time_has_started(&aptos_framework_account);
+}
+
+ + + +
+ + + +## Function `initialize_aptos_coin` + +Genesis step 2: Initialize Aptos coin. + + +
fun initialize_aptos_coin(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
fun initialize_aptos_coin(aptos_framework: &signer) {
+    let (burn_cap, mint_cap) = aptos_coin::initialize(aptos_framework);
+
+    coin::create_coin_conversion_map(aptos_framework);
+    coin::create_pairing<AptosCoin>(aptos_framework);
+
+    // Give stake module MintCapability<AptosCoin> so it can mint rewards.
+    stake::store_aptos_coin_mint_cap(aptos_framework, mint_cap);
+    // Give transaction_fee module BurnCapability<AptosCoin> so it can burn gas.
+    transaction_fee::store_aptos_coin_burn_cap(aptos_framework, burn_cap);
+    // Give transaction_fee module MintCapability<AptosCoin> so it can mint refunds.
+    transaction_fee::store_aptos_coin_mint_cap(aptos_framework, mint_cap);
+}
+
+ + + +
+ + + +## Function `initialize_core_resources_and_aptos_coin` + +Only called for testnets and e2e tests. + + +
fun initialize_core_resources_and_aptos_coin(aptos_framework: &signer, core_resources_auth_key: vector<u8>)
+
+ + + +
+Implementation + + +
fun initialize_core_resources_and_aptos_coin(
+    aptos_framework: &signer,
+    core_resources_auth_key: vector<u8>,
+) {
+    let (burn_cap, mint_cap) = aptos_coin::initialize(aptos_framework);
+
+    coin::create_coin_conversion_map(aptos_framework);
+    coin::create_pairing<AptosCoin>(aptos_framework);
+
+    // Give stake module MintCapability<AptosCoin> so it can mint rewards.
+    stake::store_aptos_coin_mint_cap(aptos_framework, mint_cap);
+    // Give transaction_fee module BurnCapability<AptosCoin> so it can burn gas.
+    transaction_fee::store_aptos_coin_burn_cap(aptos_framework, burn_cap);
+    // Give transaction_fee module MintCapability<AptosCoin> so it can mint refunds.
+    transaction_fee::store_aptos_coin_mint_cap(aptos_framework, mint_cap);
+
+    let core_resources = account::create_account(@core_resources);
+    account::rotate_authentication_key_internal(&core_resources, core_resources_auth_key);
+    aptos_account::register_apt(&core_resources); // registers APT store
+    aptos_coin::configure_accounts_for_test(aptos_framework, &core_resources, mint_cap);
+}
+
+ + + +
+ + + +## Function `create_accounts` + + + +
fun create_accounts(aptos_framework: &signer, accounts: vector<genesis::AccountMap>)
+
+ + + +
+Implementation + + +
fun create_accounts(aptos_framework: &signer, accounts: vector<AccountMap>) {
+    let unique_accounts = vector::empty();
+    vector::for_each_ref(&accounts, |account_map| {
+        let account_map: &AccountMap = account_map;
+        assert!(
+            !vector::contains(&unique_accounts, &account_map.account_address),
+            error::already_exists(EDUPLICATE_ACCOUNT),
+        );
+        vector::push_back(&mut unique_accounts, account_map.account_address);
+
+        create_account(
+            aptos_framework,
+            account_map.account_address,
+            account_map.balance,
+        );
+    });
+}
+
+ + + +
+ + + +## Function `create_account` + +This creates an funds an account if it doesn't exist. +If it exists, it just returns the signer. + + +
fun create_account(aptos_framework: &signer, account_address: address, balance: u64): signer
+
+ + + +
+Implementation + + +
fun create_account(aptos_framework: &signer, account_address: address, balance: u64): signer {
+    if (account::exists_at(account_address)) {
+        create_signer(account_address)
+    } else {
+        let account = account::create_account(account_address);
+        coin::register<AptosCoin>(&account);
+        aptos_coin::mint(aptos_framework, account_address, balance);
+        account
+    }
+}
+
+ + + +
+ + + +## Function `create_employee_validators` + + + +
fun create_employee_validators(employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>)
+
+ + + +
+Implementation + + +
fun create_employee_validators(
+    employee_vesting_start: u64,
+    employee_vesting_period_duration: u64,
+    employees: vector<EmployeeAccountMap>,
+) {
+    let unique_accounts = vector::empty();
+
+    vector::for_each_ref(&employees, |employee_group| {
+        let j = 0;
+        let employee_group: &EmployeeAccountMap = employee_group;
+        let num_employees_in_group = vector::length(&employee_group.accounts);
+
+        let buy_ins = simple_map::create();
+
+        while (j < num_employees_in_group) {
+            let account = vector::borrow(&employee_group.accounts, j);
+            assert!(
+                !vector::contains(&unique_accounts, account),
+                error::already_exists(EDUPLICATE_ACCOUNT),
+            );
+            vector::push_back(&mut unique_accounts, *account);
+
+            let employee = create_signer(*account);
+            let total = coin::balance<AptosCoin>(*account);
+            let coins = coin::withdraw<AptosCoin>(&employee, total);
+            simple_map::add(&mut buy_ins, *account, coins);
+
+            j = j + 1;
+        };
+
+        let j = 0;
+        let num_vesting_events = vector::length(&employee_group.vesting_schedule_numerator);
+        let schedule = vector::empty();
+
+        while (j < num_vesting_events) {
+            let numerator = vector::borrow(&employee_group.vesting_schedule_numerator, j);
+            let event = fixed_point32::create_from_rational(*numerator, employee_group.vesting_schedule_denominator);
+            vector::push_back(&mut schedule, event);
+
+            j = j + 1;
+        };
+
+        let vesting_schedule = vesting::create_vesting_schedule(
+            schedule,
+            employee_vesting_start,
+            employee_vesting_period_duration,
+        );
+
+        let admin = employee_group.validator.validator_config.owner_address;
+        let admin_signer = &create_signer(admin);
+        let contract_address = vesting::create_vesting_contract(
+            admin_signer,
+            &employee_group.accounts,
+            buy_ins,
+            vesting_schedule,
+            admin,
+            employee_group.validator.validator_config.operator_address,
+            employee_group.validator.validator_config.voter_address,
+            employee_group.validator.commission_percentage,
+            x"",
+        );
+        let pool_address = vesting::stake_pool_address(contract_address);
+
+        if (employee_group.beneficiary_resetter != @0x0) {
+            vesting::set_beneficiary_resetter(admin_signer, contract_address, employee_group.beneficiary_resetter);
+        };
+
+        let validator = &employee_group.validator.validator_config;
+        assert!(
+            account::exists_at(validator.owner_address),
+            error::not_found(EACCOUNT_DOES_NOT_EXIST),
+        );
+        assert!(
+            account::exists_at(validator.operator_address),
+            error::not_found(EACCOUNT_DOES_NOT_EXIST),
+        );
+        assert!(
+            account::exists_at(validator.voter_address),
+            error::not_found(EACCOUNT_DOES_NOT_EXIST),
+        );
+        if (employee_group.validator.join_during_genesis) {
+            initialize_validator(pool_address, validator);
+        };
+    });
+}
+
+ + + +
+ + + +## Function `create_initialize_validators_with_commission` + + + +
fun create_initialize_validators_with_commission(aptos_framework: &signer, use_staking_contract: bool, validators: vector<genesis::ValidatorConfigurationWithCommission>)
+
+ + + +
+Implementation + + +
fun create_initialize_validators_with_commission(
+    aptos_framework: &signer,
+    use_staking_contract: bool,
+    validators: vector<ValidatorConfigurationWithCommission>,
+) {
+    vector::for_each_ref(&validators, |validator| {
+        let validator: &ValidatorConfigurationWithCommission = validator;
+        create_initialize_validator(aptos_framework, validator, use_staking_contract);
+    });
+
+    // Destroy the aptos framework account's ability to mint coins now that we're done with setting up the initial
+    // validators.
+    aptos_coin::destroy_mint_cap(aptos_framework);
+
+    stake::on_new_epoch();
+}
+
+ + + +
+ + + +## Function `create_initialize_validators` + +Sets up the initial validator set for the network. +The validator "owner" accounts, and their authentication +Addresses (and keys) are encoded in the owners +Each validator signs consensus messages with the private key corresponding to the Ed25519 +public key in consensus_pubkeys. +Finally, each validator must specify the network address +(see types/src/network_address/mod.rs) for itself and its full nodes. + +Network address fields are a vector per account, where each entry is a vector of addresses +encoded in a single BCS byte array. + + +
fun create_initialize_validators(aptos_framework: &signer, validators: vector<genesis::ValidatorConfiguration>)
+
+ + + +
+Implementation + + +
fun create_initialize_validators(aptos_framework: &signer, validators: vector<ValidatorConfiguration>) {
+    let validators_with_commission = vector::empty();
+    vector::for_each_reverse(validators, |validator| {
+        let validator_with_commission = ValidatorConfigurationWithCommission {
+            validator_config: validator,
+            commission_percentage: 0,
+            join_during_genesis: true,
+        };
+        vector::push_back(&mut validators_with_commission, validator_with_commission);
+    });
+
+    create_initialize_validators_with_commission(aptos_framework, false, validators_with_commission);
+}
+
+ + + +
+ + + +## Function `create_initialize_validator` + + + +
fun create_initialize_validator(aptos_framework: &signer, commission_config: &genesis::ValidatorConfigurationWithCommission, use_staking_contract: bool)
+
+ + + +
+Implementation + + +
fun create_initialize_validator(
+    aptos_framework: &signer,
+    commission_config: &ValidatorConfigurationWithCommission,
+    use_staking_contract: bool,
+) {
+    let validator = &commission_config.validator_config;
+
+    let owner = &create_account(aptos_framework, validator.owner_address, validator.stake_amount);
+    create_account(aptos_framework, validator.operator_address, 0);
+    create_account(aptos_framework, validator.voter_address, 0);
+
+    // Initialize the stake pool and join the validator set.
+    let pool_address = if (use_staking_contract) {
+        staking_contract::create_staking_contract(
+            owner,
+            validator.operator_address,
+            validator.voter_address,
+            validator.stake_amount,
+            commission_config.commission_percentage,
+            x"",
+        );
+        staking_contract::stake_pool_address(validator.owner_address, validator.operator_address)
+    } else {
+        stake::initialize_stake_owner(
+            owner,
+            validator.stake_amount,
+            validator.operator_address,
+            validator.voter_address,
+        );
+        validator.owner_address
+    };
+
+    if (commission_config.join_during_genesis) {
+        initialize_validator(pool_address, validator);
+    };
+}
+
+ + + +
+ + + +## Function `initialize_validator` + + + +
fun initialize_validator(pool_address: address, validator: &genesis::ValidatorConfiguration)
+
+ + + +
+Implementation + + +
fun initialize_validator(pool_address: address, validator: &ValidatorConfiguration) {
+    let operator = &create_signer(validator.operator_address);
+
+    stake::rotate_consensus_key(
+        operator,
+        pool_address,
+        validator.consensus_pubkey,
+        validator.proof_of_possession,
+    );
+    stake::update_network_and_fullnode_addresses(
+        operator,
+        pool_address,
+        validator.network_addresses,
+        validator.full_node_network_addresses,
+    );
+    stake::join_validator_set_internal(operator, pool_address);
+}
+
+ + + +
+ + + +## Function `set_genesis_end` + +The last step of genesis. + + +
fun set_genesis_end(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
fun set_genesis_end(aptos_framework: &signer) {
+    chain_status::set_genesis_end(aptos_framework);
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1All the core resources and modules should be created during genesis and owned by the Aptos framework account.CriticalResources created during genesis initialization: GovernanceResponsbility, ConsensusConfig, ExecutionConfig, Version, SetVersionCapability, ValidatorSet, ValidatorPerformance, StakingConfig, StorageGasConfig, StorageGas, GasScheduleV2, AggregatorFactory, SupplyConfig, ChainId, Configuration, BlockResource, StateStorageUsage, CurrentTimeMicroseconds. If some of the resources were to be owned by a malicious account, it could lead to the compromise of the chain, as these are core resources. It should be formally verified by a post condition to ensure that all the critical resources are owned by the Aptos framework.Formally verified via initialize.
2Addresses ranging from 0x0 - 0xa should be reserved for the framework and part of aptos governance.CriticalThe function genesis::initialize calls account::create_framework_reserved_account for addresses 0x0, 0x2, 0x3, 0x4, ..., 0xa which creates an account and authentication_key for them. This should be formally verified by ensuring that at the beginning of the genesis::initialize function no Account resource exists for the reserved addresses, and at the end of the function, an Account resource exists.Formally verified via initialize.
3The Aptos coin should be initialized during genesis and only the Aptos framework account should own the mint and burn capabilities for the APT token.CriticalBoth mint and burn capabilities are wrapped inside the stake::AptosCoinCapabilities and transaction_fee::AptosCoinCapabilities resources which are stored under the aptos framework account.Formally verified via initialize_aptos_coin.
4An initial set of validators should exist before the end of genesis.LowTo ensure that there will be a set of validators available to validate the genesis block, the length of the ValidatorSet.active_validators vector should be > 0.Formally verified via set_genesis_end.
5The end of genesis should be marked on chain.LowThe end of genesis is marked, on chain, via the chain_status::GenesisEndMarker resource. The ownership of this resource marks the operating state of the chain.Formally verified via set_genesis_end.
+ + + + + +### Module-level Specification + + +
pragma verify = true;
+
+ + + + + +### Function `initialize` + + +
fun initialize(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+include InitalizeRequires;
+// This enforces high-level requirement 2:
+aborts_if exists<account::Account>(@0x0);
+aborts_if exists<account::Account>(@0x2);
+aborts_if exists<account::Account>(@0x3);
+aborts_if exists<account::Account>(@0x4);
+aborts_if exists<account::Account>(@0x5);
+aborts_if exists<account::Account>(@0x6);
+aborts_if exists<account::Account>(@0x7);
+aborts_if exists<account::Account>(@0x8);
+aborts_if exists<account::Account>(@0x9);
+aborts_if exists<account::Account>(@0xa);
+ensures exists<account::Account>(@0x0);
+ensures exists<account::Account>(@0x2);
+ensures exists<account::Account>(@0x3);
+ensures exists<account::Account>(@0x4);
+ensures exists<account::Account>(@0x5);
+ensures exists<account::Account>(@0x6);
+ensures exists<account::Account>(@0x7);
+ensures exists<account::Account>(@0x8);
+ensures exists<account::Account>(@0x9);
+ensures exists<account::Account>(@0xa);
+// This enforces high-level requirement 1:
+ensures exists<aptos_governance::GovernanceResponsbility>(@aptos_framework);
+ensures exists<consensus_config::ConsensusConfig>(@aptos_framework);
+ensures exists<execution_config::ExecutionConfig>(@aptos_framework);
+ensures exists<version::Version>(@aptos_framework);
+ensures exists<stake::ValidatorSet>(@aptos_framework);
+ensures exists<stake::ValidatorPerformance>(@aptos_framework);
+ensures exists<storage_gas::StorageGasConfig>(@aptos_framework);
+ensures exists<storage_gas::StorageGas>(@aptos_framework);
+ensures exists<gas_schedule::GasScheduleV2>(@aptos_framework);
+ensures exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+ensures exists<coin::SupplyConfig>(@aptos_framework);
+ensures exists<chain_id::ChainId>(@aptos_framework);
+ensures exists<reconfiguration::Configuration>(@aptos_framework);
+ensures exists<block::BlockResource>(@aptos_framework);
+ensures exists<state_storage::StateStorageUsage>(@aptos_framework);
+ensures exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+ensures exists<account::Account>(@aptos_framework);
+ensures exists<version::SetVersionCapability>(@aptos_framework);
+ensures exists<staking_config::StakingConfig>(@aptos_framework);
+
+ + + + + +### Function `initialize_aptos_coin` + + +
fun initialize_aptos_coin(aptos_framework: &signer)
+
+ + + + +
// This enforces high-level requirement 3:
+requires !exists<stake::AptosCoinCapabilities>(@aptos_framework);
+ensures exists<stake::AptosCoinCapabilities>(@aptos_framework);
+requires exists<transaction_fee::AptosCoinCapabilities>(@aptos_framework);
+ensures exists<transaction_fee::AptosCoinCapabilities>(@aptos_framework);
+
+ + + + + +### Function `create_initialize_validators_with_commission` + + +
fun create_initialize_validators_with_commission(aptos_framework: &signer, use_staking_contract: bool, validators: vector<genesis::ValidatorConfigurationWithCommission>)
+
+ + + + +
pragma verify_duration_estimate = 120;
+include stake::ResourceRequirement;
+include stake::GetReconfigStartTimeRequirement;
+include CompareTimeRequires;
+include aptos_coin::ExistsAptosCoin;
+
+ + + + + +### Function `create_initialize_validators` + + +
fun create_initialize_validators(aptos_framework: &signer, validators: vector<genesis::ValidatorConfiguration>)
+
+ + + + +
pragma verify_duration_estimate = 120;
+include stake::ResourceRequirement;
+include stake::GetReconfigStartTimeRequirement;
+include CompareTimeRequires;
+include aptos_coin::ExistsAptosCoin;
+
+ + + + + +### Function `create_initialize_validator` + + +
fun create_initialize_validator(aptos_framework: &signer, commission_config: &genesis::ValidatorConfigurationWithCommission, use_staking_contract: bool)
+
+ + + + +
include stake::ResourceRequirement;
+
+ + + + + +### Function `set_genesis_end` + + +
fun set_genesis_end(aptos_framework: &signer)
+
+ + + + +
pragma delegate_invariants_to_caller;
+// This enforces high-level requirement 4:
+requires len(global<stake::ValidatorSet>(@aptos_framework).active_validators) >= 1;
+// This enforces high-level requirement 5:
+let addr = std::signer::address_of(aptos_framework);
+aborts_if addr != @aptos_framework;
+aborts_if exists<chain_status::GenesisEndMarker>(@aptos_framework);
+ensures global<chain_status::GenesisEndMarker>(@aptos_framework) == chain_status::GenesisEndMarker {};
+
+ + + + + + + +
schema InitalizeRequires {
+    execution_config: vector<u8>;
+    requires !exists<account::Account>(@aptos_framework);
+    requires chain_status::is_operating();
+    requires len(execution_config) > 0;
+    requires exists<staking_config::StakingRewardsConfig>(@aptos_framework);
+    requires exists<stake::ValidatorFees>(@aptos_framework);
+    requires exists<coin::CoinInfo<AptosCoin>>(@aptos_framework);
+    include CompareTimeRequires;
+    include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+}
+
+ + + + + + + +
schema CompareTimeRequires {
+    let staking_rewards_config = global<staking_config::StakingRewardsConfig>(@aptos_framework);
+    requires staking_rewards_config.last_rewards_rate_period_start_in_secs <= timestamp::spec_now_seconds();
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/governance_proposal.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/governance_proposal.md new file mode 100644 index 0000000000000..e67b67aad759d --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/governance_proposal.md @@ -0,0 +1,178 @@ + + + +# Module `0x1::governance_proposal` + +Define the GovernanceProposal that will be used as part of on-chain governance by AptosGovernance. + +This is separate from the AptosGovernance module to avoid circular dependency between AptosGovernance and Stake. + + +- [Struct `GovernanceProposal`](#0x1_governance_proposal_GovernanceProposal) +- [Function `create_proposal`](#0x1_governance_proposal_create_proposal) +- [Function `create_empty_proposal`](#0x1_governance_proposal_create_empty_proposal) +- [Specification](#@Specification_0) + - [Function `create_proposal`](#@Specification_0_create_proposal) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `create_empty_proposal`](#@Specification_0_create_empty_proposal) + + +
+ + + + + +## Struct `GovernanceProposal` + + + +
struct GovernanceProposal has drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Function `create_proposal` + +Create and return a GovernanceProposal resource. Can only be called by AptosGovernance + + +
public(friend) fun create_proposal(): governance_proposal::GovernanceProposal
+
+ + + +
+Implementation + + +
public(friend) fun create_proposal(): GovernanceProposal {
+    GovernanceProposal {}
+}
+
+ + + +
+ + + +## Function `create_empty_proposal` + +Useful for AptosGovernance to create an empty proposal as proof. + + +
public(friend) fun create_empty_proposal(): governance_proposal::GovernanceProposal
+
+ + + +
+Implementation + + +
public(friend) fun create_empty_proposal(): GovernanceProposal {
+    create_proposal()
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `create_proposal` + + +
public(friend) fun create_proposal(): governance_proposal::GovernanceProposal
+
+ + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Creating a proposal should never abort but should always return a governance proposal resource.MediumBoth create_proposal and create_empty_proposal functions return a GovernanceProposal resource.Enforced via create_proposal and create_empty_proposal.
2The governance proposal module should only be accessible to the aptos governance.MediumBoth create_proposal and create_empty_proposal functions are only available to the friend module aptos_framework::aptos_governance.Enforced via friend module relationship.
+ + + + + + +### Module-level Specification + + +
aborts_if false;
+// This enforces high-level requirement 1:
+ensures result == GovernanceProposal {};
+
+ + + + + +### Function `create_empty_proposal` + + +
public(friend) fun create_empty_proposal(): governance_proposal::GovernanceProposal
+
+ + + + +
aborts_if false;
+// This enforces high-level requirement 1:
+ensures result == GovernanceProposal {};
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/guid.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/guid.md new file mode 100644 index 0000000000000..de8f0986edfd6 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/guid.md @@ -0,0 +1,522 @@ + + + +# Module `0x1::guid` + +A module for generating globally unique identifiers + + +- [Struct `GUID`](#0x1_guid_GUID) +- [Struct `ID`](#0x1_guid_ID) +- [Constants](#@Constants_0) +- [Function `create`](#0x1_guid_create) +- [Function `create_id`](#0x1_guid_create_id) +- [Function `id`](#0x1_guid_id) +- [Function `creator_address`](#0x1_guid_creator_address) +- [Function `id_creator_address`](#0x1_guid_id_creator_address) +- [Function `creation_num`](#0x1_guid_creation_num) +- [Function `id_creation_num`](#0x1_guid_id_creation_num) +- [Function `eq_id`](#0x1_guid_eq_id) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `create`](#@Specification_1_create) + - [Function `create_id`](#@Specification_1_create_id) + - [Function `id`](#@Specification_1_id) + - [Function `creator_address`](#@Specification_1_creator_address) + - [Function `id_creator_address`](#@Specification_1_id_creator_address) + - [Function `creation_num`](#@Specification_1_creation_num) + - [Function `id_creation_num`](#@Specification_1_id_creation_num) + - [Function `eq_id`](#@Specification_1_eq_id) + + +
+ + + + + +## Struct `GUID` + +A globally unique identifier derived from the sender's address and a counter + + +
struct GUID has drop, store
+
+ + + +
+Fields + + +
+
+id: guid::ID +
+
+ +
+
+ + +
+ + + +## Struct `ID` + +A non-privileged identifier that can be freely created by anyone. Useful for looking up GUID's. + + +
struct ID has copy, drop, store
+
+ + + +
+Fields + + +
+
+creation_num: u64 +
+
+ If creation_num is i, this is the i+1th GUID created by addr +
+
+addr: address +
+
+ Address that created the GUID +
+
+ + +
+ + + +## Constants + + + + +GUID generator must be published ahead of first usage of create_with_capability function. + + +
const EGUID_GENERATOR_NOT_PUBLISHED: u64 = 0;
+
+ + + + + +## Function `create` + +Create and return a new GUID from a trusted module. + + +
public(friend) fun create(addr: address, creation_num_ref: &mut u64): guid::GUID
+
+ + + +
+Implementation + + +
public(friend) fun create(addr: address, creation_num_ref: &mut u64): GUID {
+    let creation_num = *creation_num_ref;
+    *creation_num_ref = creation_num + 1;
+    GUID {
+        id: ID {
+            creation_num,
+            addr,
+        }
+    }
+}
+
+ + + +
+ + + +## Function `create_id` + +Create a non-privileged id from addr and creation_num + + +
public fun create_id(addr: address, creation_num: u64): guid::ID
+
+ + + +
+Implementation + + +
public fun create_id(addr: address, creation_num: u64): ID {
+    ID { creation_num, addr }
+}
+
+ + + +
+ + + +## Function `id` + +Get the non-privileged ID associated with a GUID + + +
public fun id(guid: &guid::GUID): guid::ID
+
+ + + +
+Implementation + + +
public fun id(guid: &GUID): ID {
+    guid.id
+}
+
+ + + +
+ + + +## Function `creator_address` + +Return the account address that created the GUID + + +
public fun creator_address(guid: &guid::GUID): address
+
+ + + +
+Implementation + + +
public fun creator_address(guid: &GUID): address {
+    guid.id.addr
+}
+
+ + + +
+ + + +## Function `id_creator_address` + +Return the account address that created the guid::ID + + +
public fun id_creator_address(id: &guid::ID): address
+
+ + + +
+Implementation + + +
public fun id_creator_address(id: &ID): address {
+    id.addr
+}
+
+ + + +
+ + + +## Function `creation_num` + +Return the creation number associated with the GUID + + +
public fun creation_num(guid: &guid::GUID): u64
+
+ + + +
+Implementation + + +
public fun creation_num(guid: &GUID): u64 {
+    guid.id.creation_num
+}
+
+ + + +
+ + + +## Function `id_creation_num` + +Return the creation number associated with the guid::ID + + +
public fun id_creation_num(id: &guid::ID): u64
+
+ + + +
+Implementation + + +
public fun id_creation_num(id: &ID): u64 {
+    id.creation_num
+}
+
+ + + +
+ + + +## Function `eq_id` + +Return true if the GUID's ID is id + + +
public fun eq_id(guid: &guid::GUID, id: &guid::ID): bool
+
+ + + +
+Implementation + + +
public fun eq_id(guid: &GUID, id: &ID): bool {
+    &guid.id == id
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The creation of GUID constructs a unique GUID by combining an address with an incremented creation number.LowThe create function generates a new GUID by combining an address with an incremented creation number, effectively creating a unique identifier.Enforced via create.
2The operations on GUID and ID, such as construction, field access, and equality comparison, should not abort.LowThe following functions will never abort: (1) create_id, (2) id, (3) creator_address, (4) id_creator_address, (5) creation_num, (6) id_creation_num, and (7) eq_id.Verified via create_id, id, creator_address, id_creator_address, creation_num, id_creation_num, and eq_id.
3The creation number should increment by 1 with each new creation.LowAn account can only own up to MAX_U64 resources. Not incrementing the guid_creation_num constantly could lead to shrinking the space for new resources.Enforced via create.
4The creation number and address of an ID / GUID must be immutable.MediumThe addr and creation_num values are meant to be constant and never updated as they are unique and used for identification.Audited: This is enforced through missing functionality to update the creation_num or addr.
+ + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `create` + + +
public(friend) fun create(addr: address, creation_num_ref: &mut u64): guid::GUID
+
+ + + + +
aborts_if creation_num_ref + 1 > MAX_U64;
+// This enforces high-level requirement 1:
+ensures result.id.creation_num == old(creation_num_ref);
+// This enforces high-level requirement 3:
+ensures creation_num_ref == old(creation_num_ref) + 1;
+
+ + + + + +### Function `create_id` + + +
public fun create_id(addr: address, creation_num: u64): guid::ID
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `id` + + +
public fun id(guid: &guid::GUID): guid::ID
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `creator_address` + + +
public fun creator_address(guid: &guid::GUID): address
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `id_creator_address` + + +
public fun id_creator_address(id: &guid::ID): address
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `creation_num` + + +
public fun creation_num(guid: &guid::GUID): u64
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `id_creation_num` + + +
public fun id_creation_num(id: &guid::ID): u64
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `eq_id` + + +
public fun eq_id(guid: &guid::GUID, id: &guid::ID): bool
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/jwk_consensus_config.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/jwk_consensus_config.md new file mode 100644 index 0000000000000..69e93270bcdf3 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/jwk_consensus_config.md @@ -0,0 +1,377 @@ + + + +# Module `0x1::jwk_consensus_config` + +Structs and functions related to JWK consensus configurations. + + +- [Resource `JWKConsensusConfig`](#0x1_jwk_consensus_config_JWKConsensusConfig) +- [Struct `ConfigOff`](#0x1_jwk_consensus_config_ConfigOff) +- [Struct `OIDCProvider`](#0x1_jwk_consensus_config_OIDCProvider) +- [Struct `ConfigV1`](#0x1_jwk_consensus_config_ConfigV1) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_jwk_consensus_config_initialize) +- [Function `set_for_next_epoch`](#0x1_jwk_consensus_config_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_jwk_consensus_config_on_new_epoch) +- [Function `new_off`](#0x1_jwk_consensus_config_new_off) +- [Function `new_v1`](#0x1_jwk_consensus_config_new_v1) +- [Function `new_oidc_provider`](#0x1_jwk_consensus_config_new_oidc_provider) +- [Specification](#@Specification_1) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + + +
use 0x1::config_buffer;
+use 0x1::copyable_any;
+use 0x1::error;
+use 0x1::option;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `JWKConsensusConfig` + +The configuration of the JWK consensus feature. + + +
struct JWKConsensusConfig has drop, store, key
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ A config variant packed as an Any. + Currently the variant type is one of the following. + - ConfigOff + - ConfigV1 +
+
+ + +
+ + + +## Struct `ConfigOff` + +A JWK consensus config variant indicating JWK consensus should not run. + + +
struct ConfigOff has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `OIDCProvider` + + + +
struct OIDCProvider has copy, drop, store
+
+ + + +
+Fields + + +
+
+name: string::String +
+
+ +
+
+config_url: string::String +
+
+ +
+
+ + +
+ + + +## Struct `ConfigV1` + +A JWK consensus config variant indicating JWK consensus should run to watch a given list of OIDC providers. + + +
struct ConfigV1 has copy, drop, store
+
+ + + +
+Fields + + +
+
+oidc_providers: vector<jwk_consensus_config::OIDCProvider> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +ConfigV1 creation failed with duplicated providers given. + + +
const EDUPLICATE_PROVIDERS: u64 = 1;
+
+ + + + + +## Function `initialize` + +Initialize the configuration. Used in genesis or governance. + + +
public fun initialize(framework: &signer, config: jwk_consensus_config::JWKConsensusConfig)
+
+ + + +
+Implementation + + +
public fun initialize(framework: &signer, config: JWKConsensusConfig) {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<JWKConsensusConfig>(@aptos_framework)) {
+        move_to(framework, config);
+    }
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +This can be called by on-chain governance to update JWK consensus configs for the next epoch. +Example usage: +``` +use aptos_framework::jwk_consensus_config; +use aptos_framework::aptos_governance; +// ... +let config = jwk_consensus_config::new_v1(vector[]); +jwk_consensus_config::set_for_next_epoch(&framework_signer, config); +aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun set_for_next_epoch(framework: &signer, config: jwk_consensus_config::JWKConsensusConfig)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(framework: &signer, config: JWKConsensusConfig) {
+    system_addresses::assert_aptos_framework(framework);
+    config_buffer::upsert(config);
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending JWKConsensusConfig, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires JWKConsensusConfig {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<JWKConsensusConfig>()) {
+        let new_config = config_buffer::extract<JWKConsensusConfig>();
+        if (exists<JWKConsensusConfig>(@aptos_framework)) {
+            *borrow_global_mut<JWKConsensusConfig>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        };
+    }
+}
+
+ + + +
+ + + +## Function `new_off` + +Construct a JWKConsensusConfig of variant ConfigOff. + + +
public fun new_off(): jwk_consensus_config::JWKConsensusConfig
+
+ + + +
+Implementation + + +
public fun new_off(): JWKConsensusConfig {
+    JWKConsensusConfig {
+        variant: copyable_any::pack( ConfigOff {} )
+    }
+}
+
+ + + +
+ + + +## Function `new_v1` + +Construct a JWKConsensusConfig of variant ConfigV1. + +Abort if the given provider list contains duplicated provider names. + + +
public fun new_v1(oidc_providers: vector<jwk_consensus_config::OIDCProvider>): jwk_consensus_config::JWKConsensusConfig
+
+ + + +
+Implementation + + +
public fun new_v1(oidc_providers: vector<OIDCProvider>): JWKConsensusConfig {
+    let name_set = simple_map::new<String, u64>();
+    vector::for_each_ref(&oidc_providers, |provider| {
+        let provider: &OIDCProvider = provider;
+        let (_, old_value) = simple_map::upsert(&mut name_set, provider.name, 0);
+        if (option::is_some(&old_value)) {
+            abort(error::invalid_argument(EDUPLICATE_PROVIDERS))
+        }
+    });
+    JWKConsensusConfig {
+        variant: copyable_any::pack( ConfigV1 { oidc_providers } )
+    }
+}
+
+ + + +
+ + + +## Function `new_oidc_provider` + +Construct an OIDCProvider object. + + +
public fun new_oidc_provider(name: string::String, config_url: string::String): jwk_consensus_config::OIDCProvider
+
+ + + +
+Implementation + + +
public fun new_oidc_provider(name: String, config_url: String): OIDCProvider {
+    OIDCProvider { name, config_url }
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<JWKConsensusConfig>;
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/jwks.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/jwks.md new file mode 100644 index 0000000000000..347eeed64b94c --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/jwks.md @@ -0,0 +1,1784 @@ + + + +# Module `0x1::jwks` + +JWK functions and structs. + +Note: An important design constraint for this module is that the JWK consensus Rust code is unable to +spawn a VM and make a Move function call. Instead, the JWK consensus Rust code will have to directly +write some of the resources in this file. As a result, the structs in this file are declared so as to +have a simple layout which is easily accessible in Rust. + + +- [Struct `OIDCProvider`](#0x1_jwks_OIDCProvider) +- [Resource `SupportedOIDCProviders`](#0x1_jwks_SupportedOIDCProviders) +- [Struct `UnsupportedJWK`](#0x1_jwks_UnsupportedJWK) +- [Struct `RSA_JWK`](#0x1_jwks_RSA_JWK) +- [Struct `JWK`](#0x1_jwks_JWK) +- [Struct `ProviderJWKs`](#0x1_jwks_ProviderJWKs) +- [Struct `AllProvidersJWKs`](#0x1_jwks_AllProvidersJWKs) +- [Resource `ObservedJWKs`](#0x1_jwks_ObservedJWKs) +- [Struct `ObservedJWKsUpdated`](#0x1_jwks_ObservedJWKsUpdated) +- [Struct `Patch`](#0x1_jwks_Patch) +- [Struct `PatchRemoveAll`](#0x1_jwks_PatchRemoveAll) +- [Struct `PatchRemoveIssuer`](#0x1_jwks_PatchRemoveIssuer) +- [Struct `PatchRemoveJWK`](#0x1_jwks_PatchRemoveJWK) +- [Struct `PatchUpsertJWK`](#0x1_jwks_PatchUpsertJWK) +- [Resource `Patches`](#0x1_jwks_Patches) +- [Resource `PatchedJWKs`](#0x1_jwks_PatchedJWKs) +- [Resource `FederatedJWKs`](#0x1_jwks_FederatedJWKs) +- [Constants](#@Constants_0) +- [Function `patch_federated_jwks`](#0x1_jwks_patch_federated_jwks) +- [Function `get_patched_jwk`](#0x1_jwks_get_patched_jwk) +- [Function `try_get_patched_jwk`](#0x1_jwks_try_get_patched_jwk) +- [Function `upsert_oidc_provider`](#0x1_jwks_upsert_oidc_provider) +- [Function `upsert_oidc_provider_for_next_epoch`](#0x1_jwks_upsert_oidc_provider_for_next_epoch) +- [Function `remove_oidc_provider`](#0x1_jwks_remove_oidc_provider) +- [Function `remove_oidc_provider_for_next_epoch`](#0x1_jwks_remove_oidc_provider_for_next_epoch) +- [Function `on_new_epoch`](#0x1_jwks_on_new_epoch) +- [Function `set_patches`](#0x1_jwks_set_patches) +- [Function `new_patch_remove_all`](#0x1_jwks_new_patch_remove_all) +- [Function `new_patch_remove_issuer`](#0x1_jwks_new_patch_remove_issuer) +- [Function `new_patch_remove_jwk`](#0x1_jwks_new_patch_remove_jwk) +- [Function `new_patch_upsert_jwk`](#0x1_jwks_new_patch_upsert_jwk) +- [Function `new_rsa_jwk`](#0x1_jwks_new_rsa_jwk) +- [Function `new_unsupported_jwk`](#0x1_jwks_new_unsupported_jwk) +- [Function `initialize`](#0x1_jwks_initialize) +- [Function `remove_oidc_provider_internal`](#0x1_jwks_remove_oidc_provider_internal) +- [Function `upsert_into_observed_jwks`](#0x1_jwks_upsert_into_observed_jwks) +- [Function `remove_issuer_from_observed_jwks`](#0x1_jwks_remove_issuer_from_observed_jwks) +- [Function `regenerate_patched_jwks`](#0x1_jwks_regenerate_patched_jwks) +- [Function `try_get_jwk_by_issuer`](#0x1_jwks_try_get_jwk_by_issuer) +- [Function `try_get_jwk_by_id`](#0x1_jwks_try_get_jwk_by_id) +- [Function `get_jwk_id`](#0x1_jwks_get_jwk_id) +- [Function `upsert_provider_jwks`](#0x1_jwks_upsert_provider_jwks) +- [Function `remove_issuer`](#0x1_jwks_remove_issuer) +- [Function `upsert_jwk`](#0x1_jwks_upsert_jwk) +- [Function `remove_jwk`](#0x1_jwks_remove_jwk) +- [Function `apply_patch`](#0x1_jwks_apply_patch) +- [Specification](#@Specification_1) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + + +
use 0x1::bcs;
+use 0x1::chain_status;
+use 0x1::comparator;
+use 0x1::config_buffer;
+use 0x1::copyable_any;
+use 0x1::error;
+use 0x1::event;
+use 0x1::option;
+use 0x1::reconfiguration;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::vector;
+
+ + + + + +## Struct `OIDCProvider` + +An OIDC provider. + + +
struct OIDCProvider has copy, drop, store
+
+ + + +
+Fields + + +
+
+name: vector<u8> +
+
+ The utf-8 encoded issuer string. E.g., b"https://www.facebook.com". +
+
+config_url: vector<u8> +
+
+ The ut8-8 encoded OpenID configuration URL of the provider. + E.g., b"https://www.facebook.com/.well-known/openid-configuration/". +
+
+ + +
+ + + +## Resource `SupportedOIDCProviders` + +A list of OIDC providers whose JWKs should be watched by validators. Maintained by governance proposals. + + +
struct SupportedOIDCProviders has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+providers: vector<jwks::OIDCProvider> +
+
+ +
+
+ + +
+ + + +## Struct `UnsupportedJWK` + +An JWK variant that represents the JWKs which were observed but not yet supported by Aptos. +Observing UnsupportedJWKs means the providers adopted a new key type/format, and the system should be updated. + + +
struct UnsupportedJWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+id: vector<u8> +
+
+ +
+
+payload: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `RSA_JWK` + +A JWK variant where kty is RSA. + + +
struct RSA_JWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+kid: string::String +
+
+ +
+
+kty: string::String +
+
+ +
+
+alg: string::String +
+
+ +
+
+e: string::String +
+
+ +
+
+n: string::String +
+
+ +
+
+ + +
+ + + +## Struct `JWK` + +A JSON web key. + + +
struct JWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ A JWK variant packed as an Any. + Currently the variant type is one of the following. + - RSA_JWK + - UnsupportedJWK +
+
+ + +
+ + + +## Struct `ProviderJWKs` + +A provider and its JWKs. + + +
struct ProviderJWKs has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ The utf-8 encoding of the issuer string (e.g., "https://www.facebook.com"). +
+
+version: u64 +
+
+ A version number is needed by JWK consensus to dedup the updates. + e.g, when on chain version = 5, multiple nodes can propose an update with version = 6. + Bumped every time the JWKs for the current issuer is updated. + The Rust authenticator only uses the latest version. +
+
+jwks: vector<jwks::JWK> +
+
+ Vector of JWK's sorted by their unique ID (from get_jwk_id) in dictionary order. +
+
+ + +
+ + + +## Struct `AllProvidersJWKs` + +Multiple ProviderJWKs objects, indexed by issuer and key ID. + + +
struct AllProvidersJWKs has copy, drop, store
+
+ + + +
+Fields + + +
+
+entries: vector<jwks::ProviderJWKs> +
+
+ Vector of ProviderJWKs sorted by ProviderJWKs::issuer in dictionary order. +
+
+ + +
+ + + +## Resource `ObservedJWKs` + +The AllProvidersJWKs that validators observed and agreed on. + + +
struct ObservedJWKs has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Struct `ObservedJWKsUpdated` + +When ObservedJWKs is updated, this event is sent to resync the JWK consensus state in all validators. + + +
#[event]
+struct ObservedJWKsUpdated has drop, store
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Struct `Patch` + +A small edit or patch that is applied to a AllProvidersJWKs to obtain PatchedJWKs. + + +
struct Patch has copy, drop, store
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ A Patch variant packed as an Any. + Currently the variant type is one of the following. + - PatchRemoveAll + - PatchRemoveIssuer + - PatchRemoveJWK + - PatchUpsertJWK +
+
+ + +
+ + + +## Struct `PatchRemoveAll` + +A Patch variant to remove all JWKs. + + +
struct PatchRemoveAll has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `PatchRemoveIssuer` + +A Patch variant to remove an issuer and all its JWKs. + + +
struct PatchRemoveIssuer has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `PatchRemoveJWK` + +A Patch variant to remove a specific JWK of an issuer. + + +
struct PatchRemoveJWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ +
+
+jwk_id: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `PatchUpsertJWK` + +A Patch variant to upsert a JWK for an issuer. + + +
struct PatchUpsertJWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ +
+
+jwk: jwks::JWK +
+
+ +
+
+ + +
+ + + +## Resource `Patches` + +A sequence of Patch objects that are applied *one by one* to the ObservedJWKs. + +Maintained by governance proposals. + + +
struct Patches has key
+
+ + + +
+Fields + + +
+
+patches: vector<jwks::Patch> +
+
+ +
+
+ + +
+ + + +## Resource `PatchedJWKs` + +The result of applying the Patches to the ObservedJWKs. +This is what applications should consume. + + +
struct PatchedJWKs has drop, key
+
+ + + +
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Resource `FederatedJWKs` + +JWKs for federated keyless accounts are stored in this resource. + + +
struct FederatedJWKs has drop, key
+
+ + + +
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EFEDERATED_JWKS_TOO_LARGE: u64 = 8;
+
+ + + + + + + +
const EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK: u64 = 7;
+
+ + + + + + + +
const EISSUER_NOT_FOUND: u64 = 5;
+
+ + + + + + + +
const EJWK_ID_NOT_FOUND: u64 = 6;
+
+ + + + + + + +
const ENATIVE_INCORRECT_VERSION: u64 = 259;
+
+ + + + + + + +
const ENATIVE_MISSING_RESOURCE_OBSERVED_JWKS: u64 = 258;
+
+ + + + + + + +
const ENATIVE_MISSING_RESOURCE_VALIDATOR_SET: u64 = 257;
+
+ + + + + + + +
const ENATIVE_MULTISIG_VERIFICATION_FAILED: u64 = 260;
+
+ + + + + + + +
const ENATIVE_NOT_ENOUGH_VOTING_POWER: u64 = 261;
+
+ + + + + + + +
const EUNEXPECTED_EPOCH: u64 = 1;
+
+ + + + + + + +
const EUNEXPECTED_VERSION: u64 = 2;
+
+ + + + + + + +
const EUNKNOWN_JWK_VARIANT: u64 = 4;
+
+ + + + + + + +
const EUNKNOWN_PATCH_VARIANT: u64 = 3;
+
+ + + + + +We limit the size of a PatchedJWKs resource installed by a dapp owner for federated keyless accounts. +Note: If too large, validators waste work reading it for invalid TXN signatures. + + +
const MAX_FEDERATED_JWKS_SIZE_BYTES: u64 = 2048;
+
+ + + + + +## Function `patch_federated_jwks` + +Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS +Cognito, etc). + +For type-safety, we explicitly use a struct FederatedJWKs { jwks: AllProviderJWKs } instead of +reusing PatchedJWKs { jwks: AllProviderJWKs }, which is a JWK-consensus-specific struct. We'd +need to be careful how we read it in Rust (but BCS serialization should be the same). + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<jwks::Patch>)
+
+ + + +
+Implementation + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<Patch>) acquires FederatedJWKs {
+    // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Aptos framework address.
+    assert!(!system_addresses::is_aptos_framework_address(signer::address_of(jwk_owner)),
+        error::invalid_argument(EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK)
+    );
+
+    let jwk_addr = signer::address_of(jwk_owner);
+    if (!exists<FederatedJWKs>(jwk_addr)) {
+        move_to(jwk_owner, FederatedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+    };
+
+    let fed_jwks = borrow_global_mut<FederatedJWKs>(jwk_addr);
+    vector::for_each_ref(&patches, |obj|{
+        let patch: &Patch = obj;
+        apply_patch(&mut fed_jwks.jwks, *patch);
+    });
+
+    // TODO: Can we check the size more efficiently instead of serializing it via BCS?
+    let num_bytes = vector::length(&bcs::to_bytes(fed_jwks));
+    assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE));
+}
+
+ + + +
+ + + +## Function `get_patched_jwk` + +Get a JWK by issuer and key ID from the PatchedJWKs. +Abort if such a JWK does not exist. +More convenient to call from Rust, since it does not wrap the JWK in an Option. + + +
public fun get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): jwks::JWK
+
+ + + +
+Implementation + + +
public fun get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): JWK acquires PatchedJWKs {
+    option::extract(&mut try_get_patched_jwk(issuer, jwk_id))
+}
+
+ + + +
+ + + +## Function `try_get_patched_jwk` + +Get a JWK by issuer and key ID from the PatchedJWKs, if it exists. +More convenient to call from Move, since it does not abort. + + +
public fun try_get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
public fun try_get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): Option<JWK> acquires PatchedJWKs {
+    let jwks = &borrow_global<PatchedJWKs>(@aptos_framework).jwks;
+    try_get_jwk_by_issuer(jwks, issuer, jwk_id)
+}
+
+ + + +
+ + + +## Function `upsert_oidc_provider` + +Deprecated by upsert_oidc_provider_for_next_epoch(). + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun upsert_oidc_provider(fx: &signer, name: vector<u8>, config_url: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun upsert_oidc_provider(fx: &signer, name: vector<u8>, config_url: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let provider_set = borrow_global_mut<SupportedOIDCProviders>(@aptos_framework);
+
+    let old_config_url= remove_oidc_provider_internal(provider_set, name);
+    vector::push_back(&mut provider_set.providers, OIDCProvider { name, config_url });
+    old_config_url
+}
+
+ + + +
+ + + +## Function `upsert_oidc_provider_for_next_epoch` + +Used in on-chain governances to update the supported OIDC providers, effective starting next epoch. +Example usage: +``` +aptos_framework::jwks::upsert_oidc_provider_for_next_epoch( +&framework_signer, +b"https://accounts.google.com", +b"https://accounts.google.com/.well-known/openid-configuration" +); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>, config_url: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>, config_url: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+
+    let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        config_buffer::extract<SupportedOIDCProviders>()
+    } else {
+        *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework)
+    };
+
+    let old_config_url = remove_oidc_provider_internal(&mut provider_set, name);
+    vector::push_back(&mut provider_set.providers, OIDCProvider { name, config_url });
+    config_buffer::upsert(provider_set);
+    old_config_url
+}
+
+ + + +
+ + + +## Function `remove_oidc_provider` + +Deprecated by remove_oidc_provider_for_next_epoch(). + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun remove_oidc_provider(fx: &signer, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun remove_oidc_provider(fx: &signer, name: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let provider_set = borrow_global_mut<SupportedOIDCProviders>(@aptos_framework);
+    remove_oidc_provider_internal(provider_set, name)
+}
+
+ + + +
+ + + +## Function `remove_oidc_provider_for_next_epoch` + +Used in on-chain governances to update the supported OIDC providers, effective starting next epoch. +Example usage: +``` +aptos_framework::jwks::remove_oidc_provider_for_next_epoch( +&framework_signer, +b"https://accounts.google.com", +); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+
+    let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        config_buffer::extract<SupportedOIDCProviders>()
+    } else {
+        *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework)
+    };
+    let ret = remove_oidc_provider_internal(&mut provider_set, name);
+    config_buffer::upsert(provider_set);
+    ret
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending SupportedOIDCProviders, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        let new_config = config_buffer::extract<SupportedOIDCProviders>();
+        if (exists<SupportedOIDCProviders>(@aptos_framework)) {
+            *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        }
+    }
+}
+
+ + + +
+ + + +## Function `set_patches` + +Set the Patches. Only called in governance proposals. + + +
public fun set_patches(fx: &signer, patches: vector<jwks::Patch>)
+
+ + + +
+Implementation + + +
public fun set_patches(fx: &signer, patches: vector<Patch>) acquires Patches, PatchedJWKs, ObservedJWKs {
+    system_addresses::assert_aptos_framework(fx);
+    borrow_global_mut<Patches>(@aptos_framework).patches = patches;
+    regenerate_patched_jwks();
+}
+
+ + + +
+ + + +## Function `new_patch_remove_all` + +Create a Patch that removes all entries. + + +
public fun new_patch_remove_all(): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_remove_all(): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchRemoveAll {}),
+    }
+}
+
+ + + +
+ + + +## Function `new_patch_remove_issuer` + +Create a Patch that removes the entry of a given issuer, if exists. + + +
public fun new_patch_remove_issuer(issuer: vector<u8>): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_remove_issuer(issuer: vector<u8>): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchRemoveIssuer { issuer }),
+    }
+}
+
+ + + +
+ + + +## Function `new_patch_remove_jwk` + +Create a Patch that removes the entry of a given issuer, if exists. + + +
public fun new_patch_remove_jwk(issuer: vector<u8>, jwk_id: vector<u8>): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_remove_jwk(issuer: vector<u8>, jwk_id: vector<u8>): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchRemoveJWK { issuer, jwk_id })
+    }
+}
+
+ + + +
+ + + +## Function `new_patch_upsert_jwk` + +Create a Patch that upserts a JWK into an issuer's JWK set. + + +
public fun new_patch_upsert_jwk(issuer: vector<u8>, jwk: jwks::JWK): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_upsert_jwk(issuer: vector<u8>, jwk: JWK): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchUpsertJWK { issuer, jwk })
+    }
+}
+
+ + + +
+ + + +## Function `new_rsa_jwk` + +Create a JWK of variant RSA_JWK. + + +
public fun new_rsa_jwk(kid: string::String, alg: string::String, e: string::String, n: string::String): jwks::JWK
+
+ + + +
+Implementation + + +
public fun new_rsa_jwk(kid: String, alg: String, e: String, n: String): JWK {
+    JWK {
+        variant: copyable_any::pack(RSA_JWK {
+            kid,
+            kty: utf8(b"RSA"),
+            e,
+            n,
+            alg,
+        }),
+    }
+}
+
+ + + +
+ + + +## Function `new_unsupported_jwk` + +Create a JWK of variant UnsupportedJWK. + + +
public fun new_unsupported_jwk(id: vector<u8>, payload: vector<u8>): jwks::JWK
+
+ + + +
+Implementation + + +
public fun new_unsupported_jwk(id: vector<u8>, payload: vector<u8>): JWK {
+    JWK {
+        variant: copyable_any::pack(UnsupportedJWK { id, payload })
+    }
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize some JWK resources. Should only be invoked by genesis. + + +
public fun initialize(fx: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(fx: &signer) {
+    system_addresses::assert_aptos_framework(fx);
+    move_to(fx, SupportedOIDCProviders { providers: vector[] });
+    move_to(fx, ObservedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+    move_to(fx, Patches { patches: vector[] });
+    move_to(fx, PatchedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+}
+
+ + + +
+ + + +## Function `remove_oidc_provider_internal` + +Helper function that removes an OIDC provider from the SupportedOIDCProviders. +Returns the old config URL of the provider, if any, as an Option. + + +
fun remove_oidc_provider_internal(provider_set: &mut jwks::SupportedOIDCProviders, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
fun remove_oidc_provider_internal(provider_set: &mut SupportedOIDCProviders, name: vector<u8>): Option<vector<u8>> {
+    let (name_exists, idx) = vector::find(&provider_set.providers, |obj| {
+        let provider: &OIDCProvider = obj;
+        provider.name == name
+    });
+
+    if (name_exists) {
+        let old_provider = vector::swap_remove(&mut provider_set.providers, idx);
+        option::some(old_provider.config_url)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `upsert_into_observed_jwks` + +Only used by validators to publish their observed JWK update. + +NOTE: It is assumed verification has been done to ensure each update is quorum-certified, +and its version equals to the on-chain version + 1. + + +
public fun upsert_into_observed_jwks(fx: &signer, provider_jwks_vec: vector<jwks::ProviderJWKs>)
+
+ + + +
+Implementation + + +
public fun upsert_into_observed_jwks(fx: &signer, provider_jwks_vec: vector<ProviderJWKs>) acquires ObservedJWKs, PatchedJWKs, Patches {
+    system_addresses::assert_aptos_framework(fx);
+    let observed_jwks = borrow_global_mut<ObservedJWKs>(@aptos_framework);
+    vector::for_each(provider_jwks_vec, |obj| {
+        let provider_jwks: ProviderJWKs = obj;
+        upsert_provider_jwks(&mut observed_jwks.jwks, provider_jwks);
+    });
+
+    let epoch = reconfiguration::current_epoch();
+    emit(ObservedJWKsUpdated { epoch, jwks: observed_jwks.jwks });
+    regenerate_patched_jwks();
+}
+
+ + + +
+ + + +## Function `remove_issuer_from_observed_jwks` + +Only used by governance to delete an issuer from ObservedJWKs, if it exists. + +Return the potentially existing ProviderJWKs of the given issuer. + + +
public fun remove_issuer_from_observed_jwks(fx: &signer, issuer: vector<u8>): option::Option<jwks::ProviderJWKs>
+
+ + + +
+Implementation + + +
public fun remove_issuer_from_observed_jwks(fx: &signer, issuer: vector<u8>): Option<ProviderJWKs> acquires ObservedJWKs, PatchedJWKs, Patches {
+    system_addresses::assert_aptos_framework(fx);
+    let observed_jwks = borrow_global_mut<ObservedJWKs>(@aptos_framework);
+    let old_value = remove_issuer(&mut observed_jwks.jwks, issuer);
+
+    let epoch = reconfiguration::current_epoch();
+    emit(ObservedJWKsUpdated { epoch, jwks: observed_jwks.jwks });
+    regenerate_patched_jwks();
+
+    old_value
+}
+
+ + + +
+ + + +## Function `regenerate_patched_jwks` + +Regenerate PatchedJWKs from ObservedJWKs and Patches and save the result. + + +
fun regenerate_patched_jwks()
+
+ + + +
+Implementation + + +
fun regenerate_patched_jwks() acquires PatchedJWKs, Patches, ObservedJWKs {
+    let jwks = borrow_global<ObservedJWKs>(@aptos_framework).jwks;
+    let patches = borrow_global<Patches>(@aptos_framework);
+    vector::for_each_ref(&patches.patches, |obj|{
+        let patch: &Patch = obj;
+        apply_patch(&mut jwks, *patch);
+    });
+    *borrow_global_mut<PatchedJWKs>(@aptos_framework) = PatchedJWKs { jwks };
+}
+
+ + + +
+ + + +## Function `try_get_jwk_by_issuer` + +Get a JWK by issuer and key ID from an AllProvidersJWKs, if it exists. + + +
fun try_get_jwk_by_issuer(jwks: &jwks::AllProvidersJWKs, issuer: vector<u8>, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun try_get_jwk_by_issuer(jwks: &AllProvidersJWKs, issuer: vector<u8>, jwk_id: vector<u8>): Option<JWK> {
+    let (issuer_found, index) = vector::find(&jwks.entries, |obj| {
+        let provider_jwks: &ProviderJWKs = obj;
+        issuer == provider_jwks.issuer
+    });
+
+    if (issuer_found) {
+        try_get_jwk_by_id(vector::borrow(&jwks.entries, index), jwk_id)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `try_get_jwk_by_id` + +Get a JWK by key ID from a ProviderJWKs, if it exists. + + +
fun try_get_jwk_by_id(provider_jwks: &jwks::ProviderJWKs, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun try_get_jwk_by_id(provider_jwks: &ProviderJWKs, jwk_id: vector<u8>): Option<JWK> {
+    let (jwk_id_found, index) = vector::find(&provider_jwks.jwks, |obj|{
+        let jwk: &JWK = obj;
+        jwk_id == get_jwk_id(jwk)
+    });
+
+    if (jwk_id_found) {
+        option::some(*vector::borrow(&provider_jwks.jwks, index))
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `get_jwk_id` + +Get the ID of a JWK. + + +
fun get_jwk_id(jwk: &jwks::JWK): vector<u8>
+
+ + + +
+Implementation + + +
fun get_jwk_id(jwk: &JWK): vector<u8> {
+    let variant_type_name = *string::bytes(copyable_any::type_name(&jwk.variant));
+    if (variant_type_name == b"0x1::jwks::RSA_JWK") {
+        let rsa = copyable_any::unpack<RSA_JWK>(jwk.variant);
+        *string::bytes(&rsa.kid)
+    } else if (variant_type_name == b"0x1::jwks::UnsupportedJWK") {
+        let unsupported = copyable_any::unpack<UnsupportedJWK>(jwk.variant);
+        unsupported.id
+    } else {
+        abort(error::invalid_argument(EUNKNOWN_JWK_VARIANT))
+    }
+}
+
+ + + +
+ + + +## Function `upsert_provider_jwks` + +Upsert a ProviderJWKs into an AllProvidersJWKs. If this upsert replaced an existing entry, return it. +Maintains the sorted-by-issuer invariant in AllProvidersJWKs. + + +
fun upsert_provider_jwks(jwks: &mut jwks::AllProvidersJWKs, provider_jwks: jwks::ProviderJWKs): option::Option<jwks::ProviderJWKs>
+
+ + + +
+Implementation + + +
fun upsert_provider_jwks(jwks: &mut AllProvidersJWKs, provider_jwks: ProviderJWKs): Option<ProviderJWKs> {
+    // NOTE: Using a linear-time search here because we do not expect too many providers.
+    let found = false;
+    let index = 0;
+    let num_entries = vector::length(&jwks.entries);
+    while (index < num_entries) {
+        let cur_entry = vector::borrow(&jwks.entries, index);
+        let comparison = compare_u8_vector(provider_jwks.issuer, cur_entry.issuer);
+        if (is_greater_than(&comparison)) {
+            index = index + 1;
+        } else {
+            found = is_equal(&comparison);
+            break
+        }
+    };
+
+    // Now if `found == true`, `index` points to the JWK we want to update/remove; otherwise, `index` points to
+    // where we want to insert.
+    let ret = if (found) {
+        let entry = vector::borrow_mut(&mut jwks.entries, index);
+        let old_entry = option::some(*entry);
+        *entry = provider_jwks;
+        old_entry
+    } else {
+        vector::insert(&mut jwks.entries, index, provider_jwks);
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `remove_issuer` + +Remove the entry of an issuer from a AllProvidersJWKs and return the entry, if exists. +Maintains the sorted-by-issuer invariant in AllProvidersJWKs. + + +
fun remove_issuer(jwks: &mut jwks::AllProvidersJWKs, issuer: vector<u8>): option::Option<jwks::ProviderJWKs>
+
+ + + +
+Implementation + + +
fun remove_issuer(jwks: &mut AllProvidersJWKs, issuer: vector<u8>): Option<ProviderJWKs> {
+    let (found, index) = vector::find(&jwks.entries, |obj| {
+        let provider_jwk_set: &ProviderJWKs = obj;
+        provider_jwk_set.issuer == issuer
+    });
+
+    let ret = if (found) {
+        option::some(vector::remove(&mut jwks.entries, index))
+    } else {
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `upsert_jwk` + +Upsert a JWK into a ProviderJWKs. If this upsert replaced an existing entry, return it. + + +
fun upsert_jwk(set: &mut jwks::ProviderJWKs, jwk: jwks::JWK): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun upsert_jwk(set: &mut ProviderJWKs, jwk: JWK): Option<JWK> {
+    let found = false;
+    let index = 0;
+    let num_entries = vector::length(&set.jwks);
+    while (index < num_entries) {
+        let cur_entry = vector::borrow(&set.jwks, index);
+        let comparison = compare_u8_vector(get_jwk_id(&jwk), get_jwk_id(cur_entry));
+        if (is_greater_than(&comparison)) {
+            index = index + 1;
+        } else {
+            found = is_equal(&comparison);
+            break
+        }
+    };
+
+    // Now if `found == true`, `index` points to the JWK we want to update/remove; otherwise, `index` points to
+    // where we want to insert.
+    let ret = if (found) {
+        let entry = vector::borrow_mut(&mut set.jwks, index);
+        let old_entry = option::some(*entry);
+        *entry = jwk;
+        old_entry
+    } else {
+        vector::insert(&mut set.jwks, index, jwk);
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `remove_jwk` + +Remove the entry of a key ID from a ProviderJWKs and return the entry, if exists. + + +
fun remove_jwk(jwks: &mut jwks::ProviderJWKs, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun remove_jwk(jwks: &mut ProviderJWKs, jwk_id: vector<u8>): Option<JWK> {
+    let (found, index) = vector::find(&jwks.jwks, |obj| {
+        let jwk: &JWK = obj;
+        jwk_id == get_jwk_id(jwk)
+    });
+
+    let ret = if (found) {
+        option::some(vector::remove(&mut jwks.jwks, index))
+    } else {
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `apply_patch` + +Modify an AllProvidersJWKs object with a Patch. +Maintains the sorted-by-issuer invariant in AllProvidersJWKs. + + +
fun apply_patch(jwks: &mut jwks::AllProvidersJWKs, patch: jwks::Patch)
+
+ + + +
+Implementation + + +
fun apply_patch(jwks: &mut AllProvidersJWKs, patch: Patch) {
+    let variant_type_name = *string::bytes(copyable_any::type_name(&patch.variant));
+    if (variant_type_name == b"0x1::jwks::PatchRemoveAll") {
+        jwks.entries = vector[];
+    } else if (variant_type_name == b"0x1::jwks::PatchRemoveIssuer") {
+        let cmd = copyable_any::unpack<PatchRemoveIssuer>(patch.variant);
+        remove_issuer(jwks, cmd.issuer);
+    } else if (variant_type_name == b"0x1::jwks::PatchRemoveJWK") {
+        let cmd = copyable_any::unpack<PatchRemoveJWK>(patch.variant);
+        // TODO: This is inefficient: we remove the issuer, modify its JWKs & and reinsert the updated issuer. Why
+        // not just update it in place?
+        let existing_jwk_set = remove_issuer(jwks, cmd.issuer);
+        if (option::is_some(&existing_jwk_set)) {
+            let jwk_set = option::extract(&mut existing_jwk_set);
+            remove_jwk(&mut jwk_set, cmd.jwk_id);
+            upsert_provider_jwks(jwks, jwk_set);
+        };
+    } else if (variant_type_name == b"0x1::jwks::PatchUpsertJWK") {
+        let cmd = copyable_any::unpack<PatchUpsertJWK>(patch.variant);
+        // TODO: This is inefficient: we remove the issuer, modify its JWKs & and reinsert the updated issuer. Why
+        // not just update it in place?
+        let existing_jwk_set = remove_issuer(jwks, cmd.issuer);
+        let jwk_set = if (option::is_some(&existing_jwk_set)) {
+            option::extract(&mut existing_jwk_set)
+        } else {
+            ProviderJWKs {
+                version: 0,
+                issuer: cmd.issuer,
+                jwks: vector[],
+            }
+        };
+        upsert_jwk(&mut jwk_set, cmd.jwk);
+        upsert_provider_jwks(jwks, jwk_set);
+    } else {
+        abort(std::error::invalid_argument(EUNKNOWN_PATCH_VARIANT))
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<SupportedOIDCProviders>;
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/keyless_account.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/keyless_account.md new file mode 100644 index 0000000000000..d3d37f1abce46 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/keyless_account.md @@ -0,0 +1,800 @@ + + + +# Module `0x1::keyless_account` + +This module is responsible for configuring keyless blockchain accounts which were introduced in +[AIP-61](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-61.md). + + +- [Struct `Group`](#0x1_keyless_account_Group) +- [Resource `Groth16VerificationKey`](#0x1_keyless_account_Groth16VerificationKey) +- [Resource `Configuration`](#0x1_keyless_account_Configuration) +- [Constants](#@Constants_0) +- [Function `new_groth16_verification_key`](#0x1_keyless_account_new_groth16_verification_key) +- [Function `new_configuration`](#0x1_keyless_account_new_configuration) +- [Function `validate_groth16_vk`](#0x1_keyless_account_validate_groth16_vk) +- [Function `update_groth16_verification_key`](#0x1_keyless_account_update_groth16_verification_key) +- [Function `update_configuration`](#0x1_keyless_account_update_configuration) +- [Function `update_training_wheels`](#0x1_keyless_account_update_training_wheels) +- [Function `update_max_exp_horizon`](#0x1_keyless_account_update_max_exp_horizon) +- [Function `remove_all_override_auds`](#0x1_keyless_account_remove_all_override_auds) +- [Function `add_override_aud`](#0x1_keyless_account_add_override_aud) +- [Function `set_groth16_verification_key_for_next_epoch`](#0x1_keyless_account_set_groth16_verification_key_for_next_epoch) +- [Function `set_configuration_for_next_epoch`](#0x1_keyless_account_set_configuration_for_next_epoch) +- [Function `update_training_wheels_for_next_epoch`](#0x1_keyless_account_update_training_wheels_for_next_epoch) +- [Function `update_max_exp_horizon_for_next_epoch`](#0x1_keyless_account_update_max_exp_horizon_for_next_epoch) +- [Function `remove_all_override_auds_for_next_epoch`](#0x1_keyless_account_remove_all_override_auds_for_next_epoch) +- [Function `add_override_aud_for_next_epoch`](#0x1_keyless_account_add_override_aud_for_next_epoch) +- [Function `on_new_epoch`](#0x1_keyless_account_on_new_epoch) +- [Specification](#@Specification_1) + + +
use 0x1::bn254_algebra;
+use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::crypto_algebra;
+use 0x1::ed25519;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+
+ + + + + +## Struct `Group` + + + +
#[resource_group(#[scope = global])]
+struct Group
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `Groth16VerificationKey` + +The 288-byte Groth16 verification key (VK) for the ZK relation that implements keyless accounts + + +
#[resource_group_member(#[group = 0x1::keyless_account::Group])]
+struct Groth16VerificationKey has drop, store, key
+
+ + + +
+Fields + + +
+
+alpha_g1: vector<u8> +
+
+ 32-byte serialization of alpha * G, where G is the generator of G1. +
+
+beta_g2: vector<u8> +
+
+ 64-byte serialization of alpha * H, where H is the generator of G2. +
+
+gamma_g2: vector<u8> +
+
+ 64-byte serialization of gamma * H, where H is the generator of G2. +
+
+delta_g2: vector<u8> +
+
+ 64-byte serialization of delta * H, where H is the generator of G2. +
+
+gamma_abc_g1: vector<vector<u8>> +
+
+ \forall i \in {0, ..., \ell}, 64-byte serialization of gamma^{-1} * (beta * a_i + alpha * b_i + c_i) * H, where + H is the generator of G1 and \ell is 1 for the ZK relation. +
+
+ + +
+ + + +## Resource `Configuration` + + + +
#[resource_group_member(#[group = 0x1::keyless_account::Group])]
+struct Configuration has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+override_aud_vals: vector<string::String> +
+
+ An override aud for the identity of a recovery service, which will help users recover their keyless accounts + associated with dapps or wallets that have disappeared. + IMPORTANT: This recovery service **cannot** on its own take over user accounts; a user must first sign in + via OAuth in the recovery service in order to allow it to rotate any of that user's keyless accounts. +
+
+max_signatures_per_txn: u16 +
+
+ No transaction can have more than this many keyless signatures. +
+
+max_exp_horizon_secs: u64 +
+
+ How far in the future from the JWT issued at time the EPK expiry can be set. +
+
+training_wheels_pubkey: option::Option<vector<u8>> +
+
+ The training wheels PK, if training wheels are on +
+
+max_commited_epk_bytes: u16 +
+
+ The max length of an ephemeral public key supported in our circuit (93 bytes) +
+
+max_iss_val_bytes: u16 +
+
+ The max length of the value of the JWT's iss field supported in our circuit (e.g., "https://accounts.google.com") +
+
+max_extra_field_bytes: u16 +
+
+ The max length of the JWT field name and value (e.g., "max_age":"18") supported in our circuit +
+
+max_jwt_header_b64_bytes: u32 +
+
+ The max length of the base64url-encoded JWT header in bytes supported in our circuit +
+
+ + +
+ + + +## Constants + + + + +A serialized BN254 G1 point is invalid. + + +
const E_INVALID_BN254_G1_SERIALIZATION: u64 = 2;
+
+ + + + + +A serialized BN254 G2 point is invalid. + + +
const E_INVALID_BN254_G2_SERIALIZATION: u64 = 3;
+
+ + + + + +The training wheels PK needs to be 32 bytes long. + + +
const E_TRAINING_WHEELS_PK_WRONG_SIZE: u64 = 1;
+
+ + + + + +## Function `new_groth16_verification_key` + + + +
public fun new_groth16_verification_key(alpha_g1: vector<u8>, beta_g2: vector<u8>, gamma_g2: vector<u8>, delta_g2: vector<u8>, gamma_abc_g1: vector<vector<u8>>): keyless_account::Groth16VerificationKey
+
+ + + +
+Implementation + + +
public fun new_groth16_verification_key(alpha_g1: vector<u8>,
+                                        beta_g2: vector<u8>,
+                                        gamma_g2: vector<u8>,
+                                        delta_g2: vector<u8>,
+                                        gamma_abc_g1: vector<vector<u8>>
+): Groth16VerificationKey {
+    Groth16VerificationKey {
+        alpha_g1,
+        beta_g2,
+        gamma_g2,
+        delta_g2,
+        gamma_abc_g1,
+    }
+}
+
+ + + +
+ + + +## Function `new_configuration` + + + +
public fun new_configuration(override_aud_val: vector<string::String>, max_signatures_per_txn: u16, max_exp_horizon_secs: u64, training_wheels_pubkey: option::Option<vector<u8>>, max_commited_epk_bytes: u16, max_iss_val_bytes: u16, max_extra_field_bytes: u16, max_jwt_header_b64_bytes: u32): keyless_account::Configuration
+
+ + + +
+Implementation + + +
public fun new_configuration(
+    override_aud_val: vector<String>,
+    max_signatures_per_txn: u16,
+    max_exp_horizon_secs: u64,
+    training_wheels_pubkey: Option<vector<u8>>,
+    max_commited_epk_bytes: u16,
+    max_iss_val_bytes: u16,
+    max_extra_field_bytes: u16,
+    max_jwt_header_b64_bytes: u32
+): Configuration {
+    Configuration {
+        override_aud_vals: override_aud_val,
+        max_signatures_per_txn,
+        max_exp_horizon_secs,
+        training_wheels_pubkey,
+        max_commited_epk_bytes,
+        max_iss_val_bytes,
+        max_extra_field_bytes,
+        max_jwt_header_b64_bytes,
+    }
+}
+
+ + + +
+ + + +## Function `validate_groth16_vk` + +Pre-validate the VK to actively-prevent incorrect VKs from being set on-chain. + + +
fun validate_groth16_vk(vk: &keyless_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
fun validate_groth16_vk(vk: &Groth16VerificationKey) {
+    // Could be leveraged to speed up the VM deserialization of the VK by 2x, since it can assume the points are valid.
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G1, bn254_algebra::FormatG1Compr>(&vk.alpha_g1)), E_INVALID_BN254_G1_SERIALIZATION);
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G2, bn254_algebra::FormatG2Compr>(&vk.beta_g2)), E_INVALID_BN254_G2_SERIALIZATION);
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G2, bn254_algebra::FormatG2Compr>(&vk.gamma_g2)), E_INVALID_BN254_G2_SERIALIZATION);
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G2, bn254_algebra::FormatG2Compr>(&vk.delta_g2)), E_INVALID_BN254_G2_SERIALIZATION);
+    for (i in 0..vector::length(&vk.gamma_abc_g1)) {
+        assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G1, bn254_algebra::FormatG1Compr>(vector::borrow(&vk.gamma_abc_g1, i))), E_INVALID_BN254_G1_SERIALIZATION);
+    };
+}
+
+ + + +
+ + + +## Function `update_groth16_verification_key` + +Sets the Groth16 verification key, only callable during genesis. To call during governance proposals, use +set_groth16_verification_key_for_next_epoch. + +WARNING: See set_groth16_verification_key_for_next_epoch for caveats. + + +
public fun update_groth16_verification_key(fx: &signer, vk: keyless_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
public fun update_groth16_verification_key(fx: &signer, vk: Groth16VerificationKey) {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+    // There should not be a previous resource set here.
+    move_to(fx, vk);
+}
+
+ + + +
+ + + +## Function `update_configuration` + +Sets the keyless configuration, only callable during genesis. To call during governance proposals, use +set_configuration_for_next_epoch. + +WARNING: See set_configuration_for_next_epoch for caveats. + + +
public fun update_configuration(fx: &signer, config: keyless_account::Configuration)
+
+ + + +
+Implementation + + +
public fun update_configuration(fx: &signer, config: Configuration) {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+    // There should not be a previous resource set here.
+    move_to(fx, config);
+}
+
+ + + +
+ + + +## Function `update_training_wheels` + + + +
#[deprecated]
+public fun update_training_wheels(fx: &signer, pk: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public fun update_training_wheels(fx: &signer, pk: Option<vector<u8>>) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    if (option::is_some(&pk)) {
+        assert!(vector::length(option::borrow(&pk)) == 32, E_TRAINING_WHEELS_PK_WRONG_SIZE)
+    };
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.training_wheels_pubkey = pk;
+}
+
+ + + +
+ + + +## Function `update_max_exp_horizon` + + + +
#[deprecated]
+public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.max_exp_horizon_secs = max_exp_horizon_secs;
+}
+
+ + + +
+ + + +## Function `remove_all_override_auds` + + + +
#[deprecated]
+public fun remove_all_override_auds(fx: &signer)
+
+ + + +
+Implementation + + +
public fun remove_all_override_auds(fx: &signer) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.override_aud_vals = vector[];
+}
+
+ + + +
+ + + +## Function `add_override_aud` + + + +
#[deprecated]
+public fun add_override_aud(fx: &signer, aud: string::String)
+
+ + + +
+Implementation + + +
public fun add_override_aud(fx: &signer, aud: String) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    vector::push_back(&mut config.override_aud_vals, aud);
+}
+
+ + + +
+ + + +## Function `set_groth16_verification_key_for_next_epoch` + +Queues up a change to the Groth16 verification key. The change will only be effective after reconfiguration. +Only callable via governance proposal. + +WARNING: To mitigate against DoS attacks, a VK change should be done together with a training wheels PK change, +so that old ZKPs for the old VK cannot be replayed as potentially-valid ZKPs. + +WARNING: If a malicious key is set, this would lead to stolen funds. + + +
public fun set_groth16_verification_key_for_next_epoch(fx: &signer, vk: keyless_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
public fun set_groth16_verification_key_for_next_epoch(fx: &signer, vk: Groth16VerificationKey) {
+    system_addresses::assert_aptos_framework(fx);
+    config_buffer::upsert<Groth16VerificationKey>(vk);
+}
+
+ + + +
+ + + +## Function `set_configuration_for_next_epoch` + +Queues up a change to the keyless configuration. The change will only be effective after reconfiguration. Only +callable via governance proposal. + +WARNING: A malicious Configuration could lead to DoS attacks, create liveness issues, or enable a malicious +recovery service provider to phish users' accounts. + + +
public fun set_configuration_for_next_epoch(fx: &signer, config: keyless_account::Configuration)
+
+ + + +
+Implementation + + +
public fun set_configuration_for_next_epoch(fx: &signer, config: Configuration) {
+    system_addresses::assert_aptos_framework(fx);
+    config_buffer::upsert<Configuration>(config);
+}
+
+ + + +
+ + + +## Function `update_training_wheels_for_next_epoch` + +Convenience method to queue up a change to the training wheels PK. The change will only be effective after +reconfiguration. Only callable via governance proposal. + +WARNING: If a malicious key is set, this *could* lead to stolen funds. + + +
public fun update_training_wheels_for_next_epoch(fx: &signer, pk: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public fun update_training_wheels_for_next_epoch(fx: &signer, pk: Option<vector<u8>>) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    // If a PK is being set, validate it first.
+    if (option::is_some(&pk)) {
+        let bytes = *option::borrow(&pk);
+        let vpk = ed25519::new_validated_public_key_from_bytes(bytes);
+        assert!(option::is_some(&vpk), E_TRAINING_WHEELS_PK_WRONG_SIZE)
+    };
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    config.training_wheels_pubkey = pk;
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `update_max_exp_horizon_for_next_epoch` + +Convenience method to queues up a change to the max expiration horizon. The change will only be effective after +reconfiguration. Only callable via governance proposal. + + +
public fun update_max_exp_horizon_for_next_epoch(fx: &signer, max_exp_horizon_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_max_exp_horizon_for_next_epoch(fx: &signer, max_exp_horizon_secs: u64) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    config.max_exp_horizon_secs = max_exp_horizon_secs;
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `remove_all_override_auds_for_next_epoch` + +Convenience method to queue up clearing the set of override aud's. The change will only be effective after +reconfiguration. Only callable via governance proposal. + +WARNING: When no override aud is set, recovery of keyless accounts associated with applications that disappeared +is no longer possible. + + +
public fun remove_all_override_auds_for_next_epoch(fx: &signer)
+
+ + + +
+Implementation + + +
public fun remove_all_override_auds_for_next_epoch(fx: &signer) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    config.override_aud_vals = vector[];
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `add_override_aud_for_next_epoch` + +Convenience method to queue up an append to to the set of override aud's. The change will only be effective +after reconfiguration. Only callable via governance proposal. + +WARNING: If a malicious override aud is set, this *could* lead to stolen funds. + + +
public fun add_override_aud_for_next_epoch(fx: &signer, aud: string::String)
+
+ + + +
+Implementation + + +
public fun add_override_aud_for_next_epoch(fx: &signer, aud: String) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    vector::push_back(&mut config.override_aud_vals, aud);
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the queued up configuration changes, if there are any. + + +
public(friend) fun on_new_epoch(fx: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(fx: &signer) acquires Groth16VerificationKey, Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    if (config_buffer::does_exist<Groth16VerificationKey>()) {
+        let vk = config_buffer::extract();
+        if (exists<Groth16VerificationKey>(@aptos_framework)) {
+            *borrow_global_mut<Groth16VerificationKey>(@aptos_framework) = vk;
+        } else {
+            move_to(fx, vk);
+        }
+    };
+
+    if (config_buffer::does_exist<Configuration>()) {
+        let config = config_buffer::extract();
+        if (exists<Configuration>(@aptos_framework)) {
+            *borrow_global_mut<Configuration>(@aptos_framework) = config;
+        } else {
+            move_to(fx, config);
+        }
+    };
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify=false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/managed_coin.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/managed_coin.md new file mode 100644 index 0000000000000..b6d1c90017f0f --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/managed_coin.md @@ -0,0 +1,426 @@ + + + +# Module `0x1::managed_coin` + +ManagedCoin is built to make a simple walkthrough of the Coins module. +It contains scripts you will need to initialize, mint, burn, transfer coins. +By utilizing this current module, a developer can create his own coin and care less about mint and burn capabilities, + + +- [Resource `Capabilities`](#0x1_managed_coin_Capabilities) +- [Constants](#@Constants_0) +- [Function `burn`](#0x1_managed_coin_burn) +- [Function `initialize`](#0x1_managed_coin_initialize) +- [Function `mint`](#0x1_managed_coin_mint) +- [Function `register`](#0x1_managed_coin_register) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `burn`](#@Specification_1_burn) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `mint`](#@Specification_1_mint) + - [Function `register`](#@Specification_1_register) + + +
use 0x1::coin;
+use 0x1::error;
+use 0x1::signer;
+use 0x1::string;
+
+ + + + + +## Resource `Capabilities` + +Capabilities resource storing mint and burn capabilities. +The resource is stored on the account that initialized coin CoinType. + + +
struct Capabilities<CoinType> has key
+
+ + + +
+Fields + + +
+
+burn_cap: coin::BurnCapability<CoinType> +
+
+ +
+
+freeze_cap: coin::FreezeCapability<CoinType> +
+
+ +
+
+mint_cap: coin::MintCapability<CoinType> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Account has no capabilities (burn/mint). + + +
const ENO_CAPABILITIES: u64 = 1;
+
+ + + + + +## Function `burn` + +Withdraw an amount of coin CoinType from account and burn it. + + +
public entry fun burn<CoinType>(account: &signer, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun burn<CoinType>(
+    account: &signer,
+    amount: u64,
+) acquires Capabilities {
+    let account_addr = signer::address_of(account);
+
+    assert!(
+        exists<Capabilities<CoinType>>(account_addr),
+        error::not_found(ENO_CAPABILITIES),
+    );
+
+    let capabilities = borrow_global<Capabilities<CoinType>>(account_addr);
+
+    let to_burn = coin::withdraw<CoinType>(account, amount);
+    coin::burn(to_burn, &capabilities.burn_cap);
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize new coin CoinType in Aptos Blockchain. +Mint and Burn Capabilities will be stored under account in Capabilities resource. + + +
public entry fun initialize<CoinType>(account: &signer, name: vector<u8>, symbol: vector<u8>, decimals: u8, monitor_supply: bool)
+
+ + + +
+Implementation + + +
public entry fun initialize<CoinType>(
+    account: &signer,
+    name: vector<u8>,
+    symbol: vector<u8>,
+    decimals: u8,
+    monitor_supply: bool,
+) {
+    let (burn_cap, freeze_cap, mint_cap) = coin::initialize<CoinType>(
+        account,
+        string::utf8(name),
+        string::utf8(symbol),
+        decimals,
+        monitor_supply,
+    );
+
+    move_to(account, Capabilities<CoinType> {
+        burn_cap,
+        freeze_cap,
+        mint_cap,
+    });
+}
+
+ + + +
+ + + +## Function `mint` + +Create new coins CoinType and deposit them into dst_addr's account. + + +
public entry fun mint<CoinType>(account: &signer, dst_addr: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun mint<CoinType>(
+    account: &signer,
+    dst_addr: address,
+    amount: u64,
+) acquires Capabilities {
+    let account_addr = signer::address_of(account);
+
+    assert!(
+        exists<Capabilities<CoinType>>(account_addr),
+        error::not_found(ENO_CAPABILITIES),
+    );
+
+    let capabilities = borrow_global<Capabilities<CoinType>>(account_addr);
+    let coins_minted = coin::mint(amount, &capabilities.mint_cap);
+    coin::deposit(dst_addr, coins_minted);
+}
+
+ + + +
+ + + +## Function `register` + +Creating a resource that stores balance of CoinType on user's account, withdraw and deposit event handlers. +Required if user wants to start accepting deposits of CoinType in his account. + + +
public entry fun register<CoinType>(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun register<CoinType>(account: &signer) {
+    coin::register<CoinType>(account);
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The initializing account should hold the capabilities to operate the coin.CriticalThe capabilities are stored under the initializing account under the Capabilities resource, which is distinct for a distinct type of coin.Enforced via initialize.
2A new coin should be properly initialized.HighIn the initialize function, a new coin is initialized via the coin module with the specified properties.Enforced via initialize_internal.
3Minting/Burning should only be done by the account who hold the valid capabilities.HighThe mint and burn capabilities are moved under the initializing account and retrieved, while minting/burningEnforced via: initialize, burn, mint.
4If the total supply of coins is being monitored, burn and mint operations will appropriately adjust the total supply.HighThe coin::burn and coin::mint functions, when tracking the supply, adjusts the total coin supply accordingly.Enforced via TotalSupplyNoChange.
5Before burning coins, exact amount of coins are withdrawn.HighAfter utilizing the coin::withdraw function to withdraw coins, they are then burned, and the function ensures the precise return of the initially specified coin amount.Enforced via burn_from.
6Minted coins are deposited to the provided destination address.HighAfter the coins are minted via coin::mint they are deposited into the coinstore of the destination address.Enforced via mint.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `burn` + + +
public entry fun burn<CoinType>(account: &signer, amount: u64)
+
+ + + + +
pragma verify = false;
+let account_addr = signer::address_of(account);
+aborts_if !exists<Capabilities<CoinType>>(account_addr);
+let coin_store = global<coin::CoinStore<CoinType>>(account_addr);
+let balance = coin_store.coin.value;
+// This enforces high-level requirement 3 and high-level requirement 4:
+aborts_if !exists<coin::CoinStore<CoinType>>(account_addr);
+aborts_if coin_store.frozen;
+aborts_if balance < amount;
+let addr =  type_info::type_of<CoinType>().account_address;
+let maybe_supply = global<coin::CoinInfo<CoinType>>(addr).supply;
+aborts_if amount == 0;
+aborts_if !exists<coin::CoinInfo<CoinType>>(addr);
+include coin::CoinSubAbortsIf<CoinType> { amount:amount };
+ensures coin::supply<CoinType> == old(coin::supply<CoinType>) - amount;
+
+ + + + + +### Function `initialize` + + +
public entry fun initialize<CoinType>(account: &signer, name: vector<u8>, symbol: vector<u8>, decimals: u8, monitor_supply: bool)
+
+ + +Make sure name and symbol are legal length. +Only the creator of CoinType can initialize. +The 'name' and 'symbol' should be valid utf8 bytes +The Capabilities should not be under the signer before creating; +The Capabilities should be under the signer after creating; + + +
include coin::InitializeInternalSchema<CoinType>;
+aborts_if !string::spec_internal_check_utf8(name);
+aborts_if !string::spec_internal_check_utf8(symbol);
+aborts_if exists<Capabilities<CoinType>>(signer::address_of(account));
+// This enforces high-level requirement 1 and high-level requirement 3:
+ensures exists<Capabilities<CoinType>>(signer::address_of(account));
+
+ + + + + +### Function `mint` + + +
public entry fun mint<CoinType>(account: &signer, dst_addr: address, amount: u64)
+
+ + +The Capabilities should not exist in the signer address. +The dst_addr should not be frozen. + + +
pragma verify = false;
+let account_addr = signer::address_of(account);
+// This enforces high-level requirement 3:
+aborts_if !exists<Capabilities<CoinType>>(account_addr);
+let addr = type_info::type_of<CoinType>().account_address;
+aborts_if (amount != 0) && !exists<coin::CoinInfo<CoinType>>(addr);
+let coin_store = global<coin::CoinStore<CoinType>>(dst_addr);
+aborts_if !exists<coin::CoinStore<CoinType>>(dst_addr);
+aborts_if coin_store.frozen;
+include coin::CoinAddAbortsIf<CoinType>;
+ensures coin::supply<CoinType> == old(coin::supply<CoinType>) + amount;
+// This enforces high-level requirement 6:
+ensures global<coin::CoinStore<CoinType>>(dst_addr).coin.value == old(global<coin::CoinStore<CoinType>>(dst_addr)).coin.value + amount;
+
+ + + + + +### Function `register` + + +
public entry fun register<CoinType>(account: &signer)
+
+ + +An account can only be registered once. +Updating Account.guid_creation_num will not overflow. + + +
pragma verify = false;
+let account_addr = signer::address_of(account);
+let acc = global<account::Account>(account_addr);
+aborts_if !exists<coin::CoinStore<CoinType>>(account_addr) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+aborts_if !exists<coin::CoinStore<CoinType>>(account_addr) && acc.guid_creation_num + 2 > MAX_U64;
+aborts_if !exists<coin::CoinStore<CoinType>>(account_addr) && !exists<account::Account>(account_addr);
+aborts_if !exists<coin::CoinStore<CoinType>>(account_addr) && !type_info::spec_is_struct<CoinType>();
+ensures exists<coin::CoinStore<CoinType>>(account_addr);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/multisig_account.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/multisig_account.md new file mode 100644 index 0000000000000..601d661ed1af5 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/multisig_account.md @@ -0,0 +1,4127 @@ + + + +# Module `0x1::multisig_account` + +Enhanced multisig account standard on Aptos. This is different from the native multisig scheme support enforced via +the account's auth key. + +This module allows creating a flexible and powerful multisig account with seamless support for updating owners +without changing the auth key. Users can choose to store transaction payloads waiting for owner signatures on chain +or off chain (primary consideration is decentralization/transparency vs gas cost). + +The multisig account is a resource account underneath. By default, it has no auth key and can only be controlled via +the special multisig transaction flow. However, owners can create a transaction to change the auth key to match a +private key off chain if so desired. + +Transactions need to be executed in order of creation, similar to transactions for a normal Aptos account (enforced +with account nonce). + +The flow is like below: +1. Owners can create a new multisig account by calling create (signer is default single owner) or with +create_with_owners where multiple initial owner addresses can be specified. This is different (and easier) from +the native multisig scheme where the owners' public keys have to be specified. Here, only addresses are needed. +2. Owners can be added/removed any time by calling add_owners or remove_owners. The transactions to do still need +to follow the k-of-n scheme specified for the multisig account. +3. To create a new transaction, an owner can call create_transaction with the transaction payload. This will store +the full transaction payload on chain, which adds decentralization (censorship is not possible as the data is +available on chain) and makes it easier to fetch all transactions waiting for execution. If saving gas is desired, +an owner can alternatively call create_transaction_with_hash where only the payload hash is stored. Later execution +will be verified using the hash. Only owners can create transactions and a transaction id (incremeting id) will be +assigned. +4. To approve or reject a transaction, other owners can call approve() or reject() with the transaction id. +5. If there are enough approvals, any owner can execute the transaction using the special MultisigTransaction type +with the transaction id if the full payload is already stored on chain or with the transaction payload if only a +hash is stored. Transaction execution will first check with this module that the transaction payload has gotten +enough signatures. If so, it will be executed as the multisig account. The owner who executes will pay for gas. +6. If there are enough rejections, any owner can finalize the rejection by calling execute_rejected_transaction(). + +Note that this multisig account model is not designed to use with a large number of owners. The more owners there +are, the more expensive voting on transactions will become. If a large number of owners is designed, such as in a +flat governance structure, clients are encouraged to write their own modules on top of this multisig account module +and implement the governance voting logic on top. + + +- [Resource `MultisigAccount`](#0x1_multisig_account_MultisigAccount) +- [Struct `MultisigTransaction`](#0x1_multisig_account_MultisigTransaction) +- [Struct `ExecutionError`](#0x1_multisig_account_ExecutionError) +- [Struct `MultisigAccountCreationMessage`](#0x1_multisig_account_MultisigAccountCreationMessage) +- [Struct `MultisigAccountCreationWithAuthKeyRevocationMessage`](#0x1_multisig_account_MultisigAccountCreationWithAuthKeyRevocationMessage) +- [Struct `AddOwnersEvent`](#0x1_multisig_account_AddOwnersEvent) +- [Struct `AddOwners`](#0x1_multisig_account_AddOwners) +- [Struct `RemoveOwnersEvent`](#0x1_multisig_account_RemoveOwnersEvent) +- [Struct `RemoveOwners`](#0x1_multisig_account_RemoveOwners) +- [Struct `UpdateSignaturesRequiredEvent`](#0x1_multisig_account_UpdateSignaturesRequiredEvent) +- [Struct `UpdateSignaturesRequired`](#0x1_multisig_account_UpdateSignaturesRequired) +- [Struct `CreateTransactionEvent`](#0x1_multisig_account_CreateTransactionEvent) +- [Struct `CreateTransaction`](#0x1_multisig_account_CreateTransaction) +- [Struct `VoteEvent`](#0x1_multisig_account_VoteEvent) +- [Struct `Vote`](#0x1_multisig_account_Vote) +- [Struct `ExecuteRejectedTransactionEvent`](#0x1_multisig_account_ExecuteRejectedTransactionEvent) +- [Struct `ExecuteRejectedTransaction`](#0x1_multisig_account_ExecuteRejectedTransaction) +- [Struct `TransactionExecutionSucceededEvent`](#0x1_multisig_account_TransactionExecutionSucceededEvent) +- [Struct `TransactionExecutionSucceeded`](#0x1_multisig_account_TransactionExecutionSucceeded) +- [Struct `TransactionExecutionFailedEvent`](#0x1_multisig_account_TransactionExecutionFailedEvent) +- [Struct `TransactionExecutionFailed`](#0x1_multisig_account_TransactionExecutionFailed) +- [Struct `MetadataUpdatedEvent`](#0x1_multisig_account_MetadataUpdatedEvent) +- [Struct `MetadataUpdated`](#0x1_multisig_account_MetadataUpdated) +- [Constants](#@Constants_0) +- [Function `metadata`](#0x1_multisig_account_metadata) +- [Function `num_signatures_required`](#0x1_multisig_account_num_signatures_required) +- [Function `owners`](#0x1_multisig_account_owners) +- [Function `is_owner`](#0x1_multisig_account_is_owner) +- [Function `get_transaction`](#0x1_multisig_account_get_transaction) +- [Function `get_pending_transactions`](#0x1_multisig_account_get_pending_transactions) +- [Function `get_next_transaction_payload`](#0x1_multisig_account_get_next_transaction_payload) +- [Function `can_be_executed`](#0x1_multisig_account_can_be_executed) +- [Function `can_execute`](#0x1_multisig_account_can_execute) +- [Function `can_be_rejected`](#0x1_multisig_account_can_be_rejected) +- [Function `can_reject`](#0x1_multisig_account_can_reject) +- [Function `get_next_multisig_account_address`](#0x1_multisig_account_get_next_multisig_account_address) +- [Function `last_resolved_sequence_number`](#0x1_multisig_account_last_resolved_sequence_number) +- [Function `next_sequence_number`](#0x1_multisig_account_next_sequence_number) +- [Function `vote`](#0x1_multisig_account_vote) +- [Function `available_transaction_queue_capacity`](#0x1_multisig_account_available_transaction_queue_capacity) +- [Function `create_with_existing_account`](#0x1_multisig_account_create_with_existing_account) +- [Function `create_with_existing_account_and_revoke_auth_key`](#0x1_multisig_account_create_with_existing_account_and_revoke_auth_key) +- [Function `create`](#0x1_multisig_account_create) +- [Function `create_with_owners`](#0x1_multisig_account_create_with_owners) +- [Function `create_with_owners_then_remove_bootstrapper`](#0x1_multisig_account_create_with_owners_then_remove_bootstrapper) +- [Function `create_with_owners_internal`](#0x1_multisig_account_create_with_owners_internal) +- [Function `add_owner`](#0x1_multisig_account_add_owner) +- [Function `add_owners`](#0x1_multisig_account_add_owners) +- [Function `add_owners_and_update_signatures_required`](#0x1_multisig_account_add_owners_and_update_signatures_required) +- [Function `remove_owner`](#0x1_multisig_account_remove_owner) +- [Function `remove_owners`](#0x1_multisig_account_remove_owners) +- [Function `swap_owner`](#0x1_multisig_account_swap_owner) +- [Function `swap_owners`](#0x1_multisig_account_swap_owners) +- [Function `swap_owners_and_update_signatures_required`](#0x1_multisig_account_swap_owners_and_update_signatures_required) +- [Function `update_signatures_required`](#0x1_multisig_account_update_signatures_required) +- [Function `update_metadata`](#0x1_multisig_account_update_metadata) +- [Function `update_metadata_internal`](#0x1_multisig_account_update_metadata_internal) +- [Function `create_transaction`](#0x1_multisig_account_create_transaction) +- [Function `create_transaction_with_hash`](#0x1_multisig_account_create_transaction_with_hash) +- [Function `approve_transaction`](#0x1_multisig_account_approve_transaction) +- [Function `reject_transaction`](#0x1_multisig_account_reject_transaction) +- [Function `vote_transanction`](#0x1_multisig_account_vote_transanction) +- [Function `vote_transaction`](#0x1_multisig_account_vote_transaction) +- [Function `vote_transactions`](#0x1_multisig_account_vote_transactions) +- [Function `execute_rejected_transaction`](#0x1_multisig_account_execute_rejected_transaction) +- [Function `execute_rejected_transactions`](#0x1_multisig_account_execute_rejected_transactions) +- [Function `validate_multisig_transaction`](#0x1_multisig_account_validate_multisig_transaction) +- [Function `successful_transaction_execution_cleanup`](#0x1_multisig_account_successful_transaction_execution_cleanup) +- [Function `failed_transaction_execution_cleanup`](#0x1_multisig_account_failed_transaction_execution_cleanup) +- [Function `transaction_execution_cleanup_common`](#0x1_multisig_account_transaction_execution_cleanup_common) +- [Function `remove_executed_transaction`](#0x1_multisig_account_remove_executed_transaction) +- [Function `add_transaction`](#0x1_multisig_account_add_transaction) +- [Function `create_multisig_account`](#0x1_multisig_account_create_multisig_account) +- [Function `create_multisig_account_seed`](#0x1_multisig_account_create_multisig_account_seed) +- [Function `validate_owners`](#0x1_multisig_account_validate_owners) +- [Function `assert_is_owner_internal`](#0x1_multisig_account_assert_is_owner_internal) +- [Function `assert_is_owner`](#0x1_multisig_account_assert_is_owner) +- [Function `num_approvals_and_rejections_internal`](#0x1_multisig_account_num_approvals_and_rejections_internal) +- [Function `num_approvals_and_rejections`](#0x1_multisig_account_num_approvals_and_rejections) +- [Function `has_voted_for_approval`](#0x1_multisig_account_has_voted_for_approval) +- [Function `has_voted_for_rejection`](#0x1_multisig_account_has_voted_for_rejection) +- [Function `assert_multisig_account_exists`](#0x1_multisig_account_assert_multisig_account_exists) +- [Function `assert_valid_sequence_number`](#0x1_multisig_account_assert_valid_sequence_number) +- [Function `assert_transaction_exists`](#0x1_multisig_account_assert_transaction_exists) +- [Function `update_owner_schema`](#0x1_multisig_account_update_owner_schema) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `metadata`](#@Specification_1_metadata) + - [Function `num_signatures_required`](#@Specification_1_num_signatures_required) + - [Function `owners`](#@Specification_1_owners) + - [Function `get_transaction`](#@Specification_1_get_transaction) + - [Function `get_next_transaction_payload`](#@Specification_1_get_next_transaction_payload) + - [Function `get_next_multisig_account_address`](#@Specification_1_get_next_multisig_account_address) + - [Function `last_resolved_sequence_number`](#@Specification_1_last_resolved_sequence_number) + - [Function `next_sequence_number`](#@Specification_1_next_sequence_number) + - [Function `vote`](#@Specification_1_vote) + + +
use 0x1::account;
+use 0x1::aptos_coin;
+use 0x1::bcs;
+use 0x1::chain_id;
+use 0x1::coin;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::hash;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::table;
+use 0x1::timestamp;
+use 0x1::vector;
+
+ + + + + +## Resource `MultisigAccount` + +Represents a multisig account's configurations and transactions. +This will be stored in the multisig account (created as a resource account separate from any owner accounts). + + +
struct MultisigAccount has key
+
+ + + +
+Fields + + +
+
+owners: vector<address> +
+
+ +
+
+num_signatures_required: u64 +
+
+ +
+
+transactions: table::Table<u64, multisig_account::MultisigTransaction> +
+
+ +
+
+last_executed_sequence_number: u64 +
+
+ +
+
+next_sequence_number: u64 +
+
+ +
+
+signer_cap: option::Option<account::SignerCapability> +
+
+ +
+
+metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+add_owners_events: event::EventHandle<multisig_account::AddOwnersEvent> +
+
+ +
+
+remove_owners_events: event::EventHandle<multisig_account::RemoveOwnersEvent> +
+
+ +
+
+update_signature_required_events: event::EventHandle<multisig_account::UpdateSignaturesRequiredEvent> +
+
+ +
+
+create_transaction_events: event::EventHandle<multisig_account::CreateTransactionEvent> +
+
+ +
+
+vote_events: event::EventHandle<multisig_account::VoteEvent> +
+
+ +
+
+execute_rejected_transaction_events: event::EventHandle<multisig_account::ExecuteRejectedTransactionEvent> +
+
+ +
+
+execute_transaction_events: event::EventHandle<multisig_account::TransactionExecutionSucceededEvent> +
+
+ +
+
+transaction_execution_failed_events: event::EventHandle<multisig_account::TransactionExecutionFailedEvent> +
+
+ +
+
+metadata_updated_events: event::EventHandle<multisig_account::MetadataUpdatedEvent> +
+
+ +
+
+ + +
+ + + +## Struct `MultisigTransaction` + +A transaction to be executed in a multisig account. +This must contain either the full transaction payload or its hash (stored as bytes). + + +
struct MultisigTransaction has copy, drop, store
+
+ + + +
+Fields + + +
+
+payload: option::Option<vector<u8>> +
+
+ +
+
+payload_hash: option::Option<vector<u8>> +
+
+ +
+
+votes: simple_map::SimpleMap<address, bool> +
+
+ +
+
+creator: address +
+
+ +
+
+creation_time_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ExecutionError` + +Contains information about execution failure. + + +
struct ExecutionError has copy, drop, store
+
+ + + +
+Fields + + +
+
+abort_location: string::String +
+
+ +
+
+error_type: string::String +
+
+ +
+
+error_code: u64 +
+
+ +
+
+ + +
+ + + +## Struct `MultisigAccountCreationMessage` + +Used only for verifying multisig account creation on top of existing accounts. + + +
struct MultisigAccountCreationMessage has copy, drop
+
+ + + +
+Fields + + +
+
+chain_id: u8 +
+
+ +
+
+account_address: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+owners: vector<address> +
+
+ +
+
+num_signatures_required: u64 +
+
+ +
+
+ + +
+ + + +## Struct `MultisigAccountCreationWithAuthKeyRevocationMessage` + +Used only for verifying multisig account creation on top of existing accounts and rotating the auth key to 0x0. + + +
struct MultisigAccountCreationWithAuthKeyRevocationMessage has copy, drop
+
+ + + +
+Fields + + +
+
+chain_id: u8 +
+
+ +
+
+account_address: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+owners: vector<address> +
+
+ +
+
+num_signatures_required: u64 +
+
+ +
+
+ + +
+ + + +## Struct `AddOwnersEvent` + +Event emitted when new owners are added to the multisig account. + + +
struct AddOwnersEvent has drop, store
+
+ + + +
+Fields + + +
+
+owners_added: vector<address> +
+
+ +
+
+ + +
+ + + +## Struct `AddOwners` + + + +
#[event]
+struct AddOwners has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+owners_added: vector<address> +
+
+ +
+
+ + +
+ + + +## Struct `RemoveOwnersEvent` + +Event emitted when new owners are removed from the multisig account. + + +
struct RemoveOwnersEvent has drop, store
+
+ + + +
+Fields + + +
+
+owners_removed: vector<address> +
+
+ +
+
+ + +
+ + + +## Struct `RemoveOwners` + + + +
#[event]
+struct RemoveOwners has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+owners_removed: vector<address> +
+
+ +
+
+ + +
+ + + +## Struct `UpdateSignaturesRequiredEvent` + +Event emitted when the number of signatures required is updated. + + +
struct UpdateSignaturesRequiredEvent has drop, store
+
+ + + +
+Fields + + +
+
+old_num_signatures_required: u64 +
+
+ +
+
+new_num_signatures_required: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateSignaturesRequired` + + + +
#[event]
+struct UpdateSignaturesRequired has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+old_num_signatures_required: u64 +
+
+ +
+
+new_num_signatures_required: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CreateTransactionEvent` + +Event emitted when a transaction is created. + + +
struct CreateTransactionEvent has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+transaction: multisig_account::MultisigTransaction +
+
+ +
+
+ + +
+ + + +## Struct `CreateTransaction` + + + +
#[event]
+struct CreateTransaction has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+creator: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+transaction: multisig_account::MultisigTransaction +
+
+ +
+
+ + +
+ + + +## Struct `VoteEvent` + +Event emitted when an owner approves or rejects a transaction. + + +
struct VoteEvent has drop, store
+
+ + + +
+Fields + + +
+
+owner: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+approved: bool +
+
+ +
+
+ + +
+ + + +## Struct `Vote` + + + +
#[event]
+struct Vote has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+owner: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+approved: bool +
+
+ +
+
+ + +
+ + + +## Struct `ExecuteRejectedTransactionEvent` + +Event emitted when a transaction is officially rejected because the number of rejections has reached the +number of signatures required. + + +
struct ExecuteRejectedTransactionEvent has drop, store
+
+ + + +
+Fields + + +
+
+sequence_number: u64 +
+
+ +
+
+num_rejections: u64 +
+
+ +
+
+executor: address +
+
+ +
+
+ + +
+ + + +## Struct `ExecuteRejectedTransaction` + + + +
#[event]
+struct ExecuteRejectedTransaction has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+num_rejections: u64 +
+
+ +
+
+executor: address +
+
+ +
+
+ + +
+ + + +## Struct `TransactionExecutionSucceededEvent` + +Event emitted when a transaction is executed. + + +
struct TransactionExecutionSucceededEvent has drop, store
+
+ + + +
+Fields + + +
+
+executor: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+transaction_payload: vector<u8> +
+
+ +
+
+num_approvals: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TransactionExecutionSucceeded` + + + +
#[event]
+struct TransactionExecutionSucceeded has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+executor: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+transaction_payload: vector<u8> +
+
+ +
+
+num_approvals: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TransactionExecutionFailedEvent` + +Event emitted when a transaction's execution failed. + + +
struct TransactionExecutionFailedEvent has drop, store
+
+ + + +
+Fields + + +
+
+executor: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+transaction_payload: vector<u8> +
+
+ +
+
+num_approvals: u64 +
+
+ +
+
+execution_error: multisig_account::ExecutionError +
+
+ +
+
+ + +
+ + + +## Struct `TransactionExecutionFailed` + + + +
#[event]
+struct TransactionExecutionFailed has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+executor: address +
+
+ +
+
+sequence_number: u64 +
+
+ +
+
+transaction_payload: vector<u8> +
+
+ +
+
+num_approvals: u64 +
+
+ +
+
+execution_error: multisig_account::ExecutionError +
+
+ +
+
+ + +
+ + + +## Struct `MetadataUpdatedEvent` + +Event emitted when a transaction's metadata is updated. + + +
struct MetadataUpdatedEvent has drop, store
+
+ + + +
+Fields + + +
+
+old_metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+new_metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+ + +
+ + + +## Struct `MetadataUpdated` + + + +
#[event]
+struct MetadataUpdated has drop, store
+
+ + + +
+Fields + + +
+
+multisig_account: address +
+
+ +
+
+old_metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+new_metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ZERO_AUTH_KEY: vector<u8> = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+
+ + + + + +The salt used to create a resource account during multisig account creation. +This is used to avoid conflicts with other modules that also create resource accounts with the same owner +account. + + +
const DOMAIN_SEPARATOR: vector<u8> = [97, 112, 116, 111, 115, 95, 102, 114, 97, 109, 101, 119, 111, 114, 107, 58, 58, 109, 117, 108, 116, 105, 115, 105, 103, 95, 97, 99, 99, 111, 117, 110, 116];
+
+ + + + + +Specified account is not a multisig account. + + +
const EACCOUNT_NOT_MULTISIG: u64 = 2002;
+
+ + + + + +The specified metadata contains duplicate attributes (keys). + + +
const EDUPLICATE_METADATA_KEY: u64 = 16;
+
+ + + + + +Owner list cannot contain the same address more than once. + + +
const EDUPLICATE_OWNER: u64 = 1;
+
+ + + + + +Payload hash must be exactly 32 bytes (sha3-256). + + +
const EINVALID_PAYLOAD_HASH: u64 = 12;
+
+ + + + + +The sequence number provided is invalid. It must be between [1, next pending transaction - 1]. + + +
const EINVALID_SEQUENCE_NUMBER: u64 = 17;
+
+ + + + + +Number of signatures required must be more than zero and at most the total number of owners. + + +
const EINVALID_SIGNATURES_REQUIRED: u64 = 11;
+
+ + + + + +The number of pending transactions has exceeded the maximum allowed. + + +
const EMAX_PENDING_TRANSACTIONS_EXCEEDED: u64 = 19;
+
+ + + + + +Multisig accounts has not been enabled on this current network yet. + + +
const EMULTISIG_ACCOUNTS_NOT_ENABLED_YET: u64 = 14;
+
+ + + + + +The multisig v2 enhancement feature is not enabled. + + +
const EMULTISIG_V2_ENHANCEMENT_NOT_ENABLED: u64 = 20;
+
+ + + + + +Transaction has not received enough approvals to be executed. + + +
const ENOT_ENOUGH_APPROVALS: u64 = 2009;
+
+ + + + + +Multisig account must have at least one owner. + + +
const ENOT_ENOUGH_OWNERS: u64 = 5;
+
+ + + + + +Transaction has not received enough rejections to be officially rejected. + + +
const ENOT_ENOUGH_REJECTIONS: u64 = 10;
+
+ + + + + +Account executing this operation is not an owner of the multisig account. + + +
const ENOT_OWNER: u64 = 2003;
+
+ + + + + +The number of metadata keys and values don't match. + + +
const ENUMBER_OF_METADATA_KEYS_AND_VALUES_DONT_MATCH: u64 = 15;
+
+ + + + + +Provided owners to remove and new owners overlap. + + +
const EOWNERS_TO_REMOVE_NEW_OWNERS_OVERLAP: u64 = 18;
+
+ + + + + +The multisig account itself cannot be an owner. + + +
const EOWNER_CANNOT_BE_MULTISIG_ACCOUNT_ITSELF: u64 = 13;
+
+ + + + + +Transaction payload cannot be empty. + + +
const EPAYLOAD_CANNOT_BE_EMPTY: u64 = 4;
+
+ + + + + +Provided target function does not match the payload stored in the on-chain transaction. + + +
const EPAYLOAD_DOES_NOT_MATCH: u64 = 2010;
+
+ + + + + +Provided target function does not match the hash stored in the on-chain transaction. + + +
const EPAYLOAD_DOES_NOT_MATCH_HASH: u64 = 2008;
+
+ + + + + +Transaction with specified id cannot be found. + + +
const ETRANSACTION_NOT_FOUND: u64 = 2006;
+
+ + + + + + + +
const MAX_PENDING_TRANSACTIONS: u64 = 20;
+
+ + + + + +## Function `metadata` + +Return the multisig account's metadata. + + +
#[view]
+public fun metadata(multisig_account: address): simple_map::SimpleMap<string::String, vector<u8>>
+
+ + + +
+Implementation + + +
public fun metadata(multisig_account: address): SimpleMap<String, vector<u8>> acquires MultisigAccount {
+    borrow_global<MultisigAccount>(multisig_account).metadata
+}
+
+ + + +
+ + + +## Function `num_signatures_required` + +Return the number of signatures required to execute or execute-reject a transaction in the provided +multisig account. + + +
#[view]
+public fun num_signatures_required(multisig_account: address): u64
+
+ + + +
+Implementation + + +
public fun num_signatures_required(multisig_account: address): u64 acquires MultisigAccount {
+    borrow_global<MultisigAccount>(multisig_account).num_signatures_required
+}
+
+ + + +
+ + + +## Function `owners` + +Return a vector of all of the provided multisig account's owners. + + +
#[view]
+public fun owners(multisig_account: address): vector<address>
+
+ + + +
+Implementation + + +
public fun owners(multisig_account: address): vector<address> acquires MultisigAccount {
+    borrow_global<MultisigAccount>(multisig_account).owners
+}
+
+ + + +
+ + + +## Function `is_owner` + +Return true if the provided owner is an owner of the provided multisig account. + + +
#[view]
+public fun is_owner(owner: address, multisig_account: address): bool
+
+ + + +
+Implementation + + +
public fun is_owner(owner: address, multisig_account: address): bool acquires MultisigAccount {
+    vector::contains(&borrow_global<MultisigAccount>(multisig_account).owners, &owner)
+}
+
+ + + +
+ + + +## Function `get_transaction` + +Return the transaction with the given transaction id. + + +
#[view]
+public fun get_transaction(multisig_account: address, sequence_number: u64): multisig_account::MultisigTransaction
+
+ + + +
+Implementation + + +
public fun get_transaction(
+    multisig_account: address,
+    sequence_number: u64,
+): MultisigTransaction acquires MultisigAccount {
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    assert!(
+        sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number,
+        error::invalid_argument(EINVALID_SEQUENCE_NUMBER),
+    );
+    *table::borrow(&multisig_account_resource.transactions, sequence_number)
+}
+
+ + + +
+ + + +## Function `get_pending_transactions` + +Return all pending transactions. + + +
#[view]
+public fun get_pending_transactions(multisig_account: address): vector<multisig_account::MultisigTransaction>
+
+ + + +
+Implementation + + +
public fun get_pending_transactions(
+    multisig_account: address
+): vector<MultisigTransaction> acquires MultisigAccount {
+    let pending_transactions: vector<MultisigTransaction> = vector[];
+    let multisig_account = borrow_global<MultisigAccount>(multisig_account);
+    let i = multisig_account.last_executed_sequence_number + 1;
+    let next_sequence_number = multisig_account.next_sequence_number;
+    while (i < next_sequence_number) {
+        vector::push_back(&mut pending_transactions, *table::borrow(&multisig_account.transactions, i));
+        i = i + 1;
+    };
+    pending_transactions
+}
+
+ + + +
+ + + +## Function `get_next_transaction_payload` + +Return the payload for the next transaction in the queue. + + +
#[view]
+public fun get_next_transaction_payload(multisig_account: address, provided_payload: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_next_transaction_payload(
+    multisig_account: address, provided_payload: vector<u8>): vector<u8> acquires MultisigAccount {
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    let sequence_number = multisig_account_resource.last_executed_sequence_number + 1;
+    let transaction = table::borrow(&multisig_account_resource.transactions, sequence_number);
+
+    if (option::is_some(&transaction.payload)) {
+        *option::borrow(&transaction.payload)
+    } else {
+        provided_payload
+    }
+}
+
+ + + +
+ + + +## Function `can_be_executed` + +Return true if the transaction with given transaction id can be executed now. + + +
#[view]
+public fun can_be_executed(multisig_account: address, sequence_number: u64): bool
+
+ + + +
+Implementation + + +
public fun can_be_executed(multisig_account: address, sequence_number: u64): bool acquires MultisigAccount {
+    assert_valid_sequence_number(multisig_account, sequence_number);
+    let (num_approvals, _) = num_approvals_and_rejections(multisig_account, sequence_number);
+    sequence_number == last_resolved_sequence_number(multisig_account) + 1 &&
+        num_approvals >= num_signatures_required(multisig_account)
+}
+
+ + + +
+ + + +## Function `can_execute` + +Return true if the owner can execute the transaction with given transaction id now. + + +
#[view]
+public fun can_execute(owner: address, multisig_account: address, sequence_number: u64): bool
+
+ + + +
+Implementation + + +
public fun can_execute(owner: address, multisig_account: address, sequence_number: u64): bool acquires MultisigAccount {
+    assert_valid_sequence_number(multisig_account, sequence_number);
+    let (num_approvals, _) = num_approvals_and_rejections(multisig_account, sequence_number);
+    if (!has_voted_for_approval(multisig_account, sequence_number, owner)) {
+        num_approvals = num_approvals + 1;
+    };
+    is_owner(owner, multisig_account) &&
+        sequence_number == last_resolved_sequence_number(multisig_account) + 1 &&
+        num_approvals >= num_signatures_required(multisig_account)
+}
+
+ + + +
+ + + +## Function `can_be_rejected` + +Return true if the transaction with given transaction id can be officially rejected. + + +
#[view]
+public fun can_be_rejected(multisig_account: address, sequence_number: u64): bool
+
+ + + +
+Implementation + + +
public fun can_be_rejected(multisig_account: address, sequence_number: u64): bool acquires MultisigAccount {
+    assert_valid_sequence_number(multisig_account, sequence_number);
+    let (_, num_rejections) = num_approvals_and_rejections(multisig_account, sequence_number);
+    sequence_number == last_resolved_sequence_number(multisig_account) + 1 &&
+        num_rejections >= num_signatures_required(multisig_account)
+}
+
+ + + +
+ + + +## Function `can_reject` + +Return true if the owner can execute the "rejected" transaction with given transaction id now. + + +
#[view]
+public fun can_reject(owner: address, multisig_account: address, sequence_number: u64): bool
+
+ + + +
+Implementation + + +
public fun can_reject(owner: address, multisig_account: address, sequence_number: u64): bool acquires MultisigAccount {
+    assert_valid_sequence_number(multisig_account, sequence_number);
+    let (_, num_rejections) = num_approvals_and_rejections(multisig_account, sequence_number);
+    if (!has_voted_for_rejection(multisig_account, sequence_number, owner)) {
+        num_rejections = num_rejections + 1;
+    };
+    is_owner(owner, multisig_account) &&
+        sequence_number == last_resolved_sequence_number(multisig_account) + 1 &&
+        num_rejections >= num_signatures_required(multisig_account)
+}
+
+ + + +
+ + + +## Function `get_next_multisig_account_address` + +Return the predicted address for the next multisig account if created from the given creator address. + + +
#[view]
+public fun get_next_multisig_account_address(creator: address): address
+
+ + + +
+Implementation + + +
public fun get_next_multisig_account_address(creator: address): address {
+    let owner_nonce = account::get_sequence_number(creator);
+    create_resource_address(&creator, create_multisig_account_seed(to_bytes(&owner_nonce)))
+}
+
+ + + +
+ + + +## Function `last_resolved_sequence_number` + +Return the id of the last transaction that was executed (successful or failed) or removed. + + +
#[view]
+public fun last_resolved_sequence_number(multisig_account: address): u64
+
+ + + +
+Implementation + + +
public fun last_resolved_sequence_number(multisig_account: address): u64 acquires MultisigAccount {
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    multisig_account_resource.last_executed_sequence_number
+}
+
+ + + +
+ + + +## Function `next_sequence_number` + +Return the id of the next transaction created. + + +
#[view]
+public fun next_sequence_number(multisig_account: address): u64
+
+ + + +
+Implementation + + +
public fun next_sequence_number(multisig_account: address): u64 acquires MultisigAccount {
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    multisig_account_resource.next_sequence_number
+}
+
+ + + +
+ + + +## Function `vote` + +Return a bool tuple indicating whether an owner has voted and if so, whether they voted yes or no. + + +
#[view]
+public fun vote(multisig_account: address, sequence_number: u64, owner: address): (bool, bool)
+
+ + + +
+Implementation + + +
public fun vote(
+    multisig_account: address, sequence_number: u64, owner: address): (bool, bool) acquires MultisigAccount {
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    assert!(
+        sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number,
+        error::invalid_argument(EINVALID_SEQUENCE_NUMBER),
+    );
+    let transaction = table::borrow(&multisig_account_resource.transactions, sequence_number);
+    let votes = &transaction.votes;
+    let voted = simple_map::contains_key(votes, &owner);
+    let vote = voted && *simple_map::borrow(votes, &owner);
+    (voted, vote)
+}
+
+ + + +
+ + + +## Function `available_transaction_queue_capacity` + + + +
#[view]
+public fun available_transaction_queue_capacity(multisig_account: address): u64
+
+ + + +
+Implementation + + +
public fun available_transaction_queue_capacity(multisig_account: address): u64 acquires MultisigAccount {
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let num_pending_transactions = multisig_account_resource.next_sequence_number - multisig_account_resource.last_executed_sequence_number - 1;
+    if (num_pending_transactions > MAX_PENDING_TRANSACTIONS) {
+        0
+    } else {
+        MAX_PENDING_TRANSACTIONS - num_pending_transactions
+    }
+}
+
+ + + +
+ + + +## Function `create_with_existing_account` + +Creates a new multisig account on top of an existing account. + +This offers a migration path for an existing account with a multi-ed25519 auth key (native multisig account). +In order to ensure a malicious module cannot obtain backdoor control over an existing account, a signed message +with a valid signature from the account's auth key is required. + +Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth +key after they are fully migrated to the new multisig account. Alternatively, they can call +create_with_existing_account_and_revoke_auth_key instead. + + +
public entry fun create_with_existing_account(multisig_address: address, owners: vector<address>, num_signatures_required: u64, account_scheme: u8, account_public_key: vector<u8>, create_multisig_account_signed_message: vector<u8>, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create_with_existing_account(
+    multisig_address: address,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    account_scheme: u8,
+    account_public_key: vector<u8>,
+    create_multisig_account_signed_message: vector<u8>,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    // Verify that the `MultisigAccountCreationMessage` has the right information and is signed by the account
+    // owner's key.
+    let proof_challenge = MultisigAccountCreationMessage {
+        chain_id: chain_id::get(),
+        account_address: multisig_address,
+        sequence_number: account::get_sequence_number(multisig_address),
+        owners,
+        num_signatures_required,
+    };
+    account::verify_signed_message(
+        multisig_address,
+        account_scheme,
+        account_public_key,
+        create_multisig_account_signed_message,
+        proof_challenge,
+    );
+
+    // We create the signer for the multisig account here since this is required to add the MultisigAccount resource
+    // This should be safe and authorized because we have verified the signed message from the existing account
+    // that authorizes creating a multisig account with the specified owners and signature threshold.
+    let multisig_account = &create_signer(multisig_address);
+    create_with_owners_internal(
+        multisig_account,
+        owners,
+        num_signatures_required,
+        option::none<SignerCapability>(),
+        metadata_keys,
+        metadata_values,
+    );
+}
+
+ + + +
+ + + +## Function `create_with_existing_account_and_revoke_auth_key` + +Creates a new multisig account on top of an existing account and immediately rotate the origin auth key to 0x0. + +Note: If the original account is a resource account, this does not revoke all control over it as if any +SignerCapability of the resource account still exists, it can still be used to generate the signer for the +account. + + +
public entry fun create_with_existing_account_and_revoke_auth_key(multisig_address: address, owners: vector<address>, num_signatures_required: u64, account_scheme: u8, account_public_key: vector<u8>, create_multisig_account_signed_message: vector<u8>, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create_with_existing_account_and_revoke_auth_key(
+    multisig_address: address,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    account_scheme: u8,
+    account_public_key: vector<u8>,
+    create_multisig_account_signed_message: vector<u8>,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    // Verify that the `MultisigAccountCreationMessage` has the right information and is signed by the account
+    // owner's key.
+    let proof_challenge = MultisigAccountCreationWithAuthKeyRevocationMessage {
+        chain_id: chain_id::get(),
+        account_address: multisig_address,
+        sequence_number: account::get_sequence_number(multisig_address),
+        owners,
+        num_signatures_required,
+    };
+    account::verify_signed_message(
+        multisig_address,
+        account_scheme,
+        account_public_key,
+        create_multisig_account_signed_message,
+        proof_challenge,
+    );
+
+    // We create the signer for the multisig account here since this is required to add the MultisigAccount resource
+    // This should be safe and authorized because we have verified the signed message from the existing account
+    // that authorizes creating a multisig account with the specified owners and signature threshold.
+    let multisig_account = &create_signer(multisig_address);
+    create_with_owners_internal(
+        multisig_account,
+        owners,
+        num_signatures_required,
+        option::none<SignerCapability>(),
+        metadata_keys,
+        metadata_values,
+    );
+
+    // Rotate the account's auth key to 0x0, which effectively revokes control via auth key.
+    let multisig_address = address_of(multisig_account);
+    account::rotate_authentication_key_internal(multisig_account, ZERO_AUTH_KEY);
+    // This also needs to revoke any signer capability or rotation capability that exists for the account to
+    // completely remove all access to the account.
+    if (account::is_signer_capability_offered(multisig_address)) {
+        account::revoke_any_signer_capability(multisig_account);
+    };
+    if (account::is_rotation_capability_offered(multisig_address)) {
+        account::revoke_any_rotation_capability(multisig_account);
+    };
+}
+
+ + + +
+ + + +## Function `create` + +Creates a new multisig account and add the signer as a single owner. + + +
public entry fun create(owner: &signer, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create(
+    owner: &signer,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    create_with_owners(owner, vector[], num_signatures_required, metadata_keys, metadata_values);
+}
+
+ + + +
+ + + +## Function `create_with_owners` + +Creates a new multisig account with the specified additional owner list and signatures required. + +@param additional_owners The owner account who calls this function cannot be in the additional_owners and there +cannot be any duplicate owners in the list. +@param num_signatures_required The number of signatures required to execute a transaction. Must be at least 1 and +at most the total number of owners. + + +
public entry fun create_with_owners(owner: &signer, additional_owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create_with_owners(
+    owner: &signer,
+    additional_owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    let (multisig_account, multisig_signer_cap) = create_multisig_account(owner);
+    vector::push_back(&mut additional_owners, address_of(owner));
+    create_with_owners_internal(
+        &multisig_account,
+        additional_owners,
+        num_signatures_required,
+        option::some(multisig_signer_cap),
+        metadata_keys,
+        metadata_values,
+    );
+}
+
+ + + +
+ + + +## Function `create_with_owners_then_remove_bootstrapper` + +Like create_with_owners, but removes the calling account after creation. + +This is for creating a vanity multisig account from a bootstrapping account that should not +be an owner after the vanity multisig address has been secured. + + +
public entry fun create_with_owners_then_remove_bootstrapper(bootstrapper: &signer, owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create_with_owners_then_remove_bootstrapper(
+    bootstrapper: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    let bootstrapper_address = address_of(bootstrapper);
+    create_with_owners(
+        bootstrapper,
+        owners,
+        num_signatures_required,
+        metadata_keys,
+        metadata_values
+    );
+    update_owner_schema(
+        get_next_multisig_account_address(bootstrapper_address),
+        vector[],
+        vector[bootstrapper_address],
+        option::none()
+    );
+}
+
+ + + +
+ + + +## Function `create_with_owners_internal` + + + +
fun create_with_owners_internal(multisig_account: &signer, owners: vector<address>, num_signatures_required: u64, multisig_account_signer_cap: option::Option<account::SignerCapability>, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
fun create_with_owners_internal(
+    multisig_account: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    multisig_account_signer_cap: Option<SignerCapability>,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    assert!(features::multisig_accounts_enabled(), error::unavailable(EMULTISIG_ACCOUNTS_NOT_ENABLED_YET));
+    assert!(
+        num_signatures_required > 0 && num_signatures_required <= vector::length(&owners),
+        error::invalid_argument(EINVALID_SIGNATURES_REQUIRED),
+    );
+
+    let multisig_address = address_of(multisig_account);
+    validate_owners(&owners, multisig_address);
+    move_to(multisig_account, MultisigAccount {
+        owners,
+        num_signatures_required,
+        transactions: table::new<u64, MultisigTransaction>(),
+        metadata: simple_map::create<String, vector<u8>>(),
+        // First transaction will start at id 1 instead of 0.
+        last_executed_sequence_number: 0,
+        next_sequence_number: 1,
+        signer_cap: multisig_account_signer_cap,
+        add_owners_events: new_event_handle<AddOwnersEvent>(multisig_account),
+        remove_owners_events: new_event_handle<RemoveOwnersEvent>(multisig_account),
+        update_signature_required_events: new_event_handle<UpdateSignaturesRequiredEvent>(multisig_account),
+        create_transaction_events: new_event_handle<CreateTransactionEvent>(multisig_account),
+        vote_events: new_event_handle<VoteEvent>(multisig_account),
+        execute_rejected_transaction_events: new_event_handle<ExecuteRejectedTransactionEvent>(multisig_account),
+        execute_transaction_events: new_event_handle<TransactionExecutionSucceededEvent>(multisig_account),
+        transaction_execution_failed_events: new_event_handle<TransactionExecutionFailedEvent>(multisig_account),
+        metadata_updated_events: new_event_handle<MetadataUpdatedEvent>(multisig_account),
+    });
+
+    update_metadata_internal(multisig_account, metadata_keys, metadata_values, false);
+}
+
+ + + +
+ + + +## Function `add_owner` + +Similar to add_owners, but only allow adding one owner. + + +
entry fun add_owner(multisig_account: &signer, new_owner: address)
+
+ + + +
+Implementation + + +
entry fun add_owner(multisig_account: &signer, new_owner: address) acquires MultisigAccount {
+    add_owners(multisig_account, vector[new_owner]);
+}
+
+ + + +
+ + + +## Function `add_owners` + +Add new owners to the multisig account. This can only be invoked by the multisig account itself, through the +proposal flow. + +Note that this function is not public so it can only be invoked directly instead of via a module or script. This +ensures that a multisig transaction cannot lead to another module obtaining the multisig signer and using it to +maliciously alter the owners list. + + +
entry fun add_owners(multisig_account: &signer, new_owners: vector<address>)
+
+ + + +
+Implementation + + +
entry fun add_owners(
+    multisig_account: &signer, new_owners: vector<address>) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        new_owners,
+        vector[],
+        option::none()
+    );
+}
+
+ + + +
+ + + +## Function `add_owners_and_update_signatures_required` + +Add owners then update number of signatures required, in a single operation. + + +
entry fun add_owners_and_update_signatures_required(multisig_account: &signer, new_owners: vector<address>, new_num_signatures_required: u64)
+
+ + + +
+Implementation + + +
entry fun add_owners_and_update_signatures_required(
+    multisig_account: &signer,
+    new_owners: vector<address>,
+    new_num_signatures_required: u64
+) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        new_owners,
+        vector[],
+        option::some(new_num_signatures_required)
+    );
+}
+
+ + + +
+ + + +## Function `remove_owner` + +Similar to remove_owners, but only allow removing one owner. + + +
entry fun remove_owner(multisig_account: &signer, owner_to_remove: address)
+
+ + + +
+Implementation + + +
entry fun remove_owner(
+    multisig_account: &signer, owner_to_remove: address) acquires MultisigAccount {
+    remove_owners(multisig_account, vector[owner_to_remove]);
+}
+
+ + + +
+ + + +## Function `remove_owners` + +Remove owners from the multisig account. This can only be invoked by the multisig account itself, through the +proposal flow. + +This function skips any owners who are not in the multisig account's list of owners. +Note that this function is not public so it can only be invoked directly instead of via a module or script. This +ensures that a multisig transaction cannot lead to another module obtaining the multisig signer and using it to +maliciously alter the owners list. + + +
entry fun remove_owners(multisig_account: &signer, owners_to_remove: vector<address>)
+
+ + + +
+Implementation + + +
entry fun remove_owners(
+    multisig_account: &signer, owners_to_remove: vector<address>) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        vector[],
+        owners_to_remove,
+        option::none()
+    );
+}
+
+ + + +
+ + + +## Function `swap_owner` + +Swap an owner in for an old one, without changing required signatures. + + +
entry fun swap_owner(multisig_account: &signer, to_swap_in: address, to_swap_out: address)
+
+ + + +
+Implementation + + +
entry fun swap_owner(
+    multisig_account: &signer,
+    to_swap_in: address,
+    to_swap_out: address
+) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        vector[to_swap_in],
+        vector[to_swap_out],
+        option::none()
+    );
+}
+
+ + + +
+ + + +## Function `swap_owners` + +Swap owners in and out, without changing required signatures. + + +
entry fun swap_owners(multisig_account: &signer, to_swap_in: vector<address>, to_swap_out: vector<address>)
+
+ + + +
+Implementation + + +
entry fun swap_owners(
+    multisig_account: &signer,
+    to_swap_in: vector<address>,
+    to_swap_out: vector<address>
+) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        to_swap_in,
+        to_swap_out,
+        option::none()
+    );
+}
+
+ + + +
+ + + +## Function `swap_owners_and_update_signatures_required` + +Swap owners in and out, updating number of required signatures. + + +
entry fun swap_owners_and_update_signatures_required(multisig_account: &signer, new_owners: vector<address>, owners_to_remove: vector<address>, new_num_signatures_required: u64)
+
+ + + +
+Implementation + + +
entry fun swap_owners_and_update_signatures_required(
+    multisig_account: &signer,
+    new_owners: vector<address>,
+    owners_to_remove: vector<address>,
+    new_num_signatures_required: u64
+) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        new_owners,
+        owners_to_remove,
+        option::some(new_num_signatures_required)
+    );
+}
+
+ + + +
+ + + +## Function `update_signatures_required` + +Update the number of signatures required to execute transaction in the specified multisig account. + +This can only be invoked by the multisig account itself, through the proposal flow. +Note that this function is not public so it can only be invoked directly instead of via a module or script. This +ensures that a multisig transaction cannot lead to another module obtaining the multisig signer and using it to +maliciously alter the number of signatures required. + + +
entry fun update_signatures_required(multisig_account: &signer, new_num_signatures_required: u64)
+
+ + + +
+Implementation + + +
entry fun update_signatures_required(
+    multisig_account: &signer, new_num_signatures_required: u64) acquires MultisigAccount {
+    update_owner_schema(
+        address_of(multisig_account),
+        vector[],
+        vector[],
+        option::some(new_num_signatures_required)
+    );
+}
+
+ + + +
+ + + +## Function `update_metadata` + +Allow the multisig account to update its own metadata. Note that this overrides the entire existing metadata. +If any attributes are not specified in the metadata, they will be removed! + +This can only be invoked by the multisig account itself, through the proposal flow. +Note that this function is not public so it can only be invoked directly instead of via a module or script. This +ensures that a multisig transaction cannot lead to another module obtaining the multisig signer and using it to +maliciously alter the number of signatures required. + + +
entry fun update_metadata(multisig_account: &signer, keys: vector<string::String>, values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
entry fun update_metadata(
+    multisig_account: &signer, keys: vector<String>, values: vector<vector<u8>>) acquires MultisigAccount {
+    update_metadata_internal(multisig_account, keys, values, true);
+}
+
+ + + +
+ + + +## Function `update_metadata_internal` + + + +
fun update_metadata_internal(multisig_account: &signer, keys: vector<string::String>, values: vector<vector<u8>>, emit_event: bool)
+
+ + + +
+Implementation + + +
fun update_metadata_internal(
+    multisig_account: &signer,
+    keys: vector<String>,
+    values: vector<vector<u8>>,
+    emit_event: bool,
+) acquires MultisigAccount {
+    let num_attributes = vector::length(&keys);
+    assert!(
+        num_attributes == vector::length(&values),
+        error::invalid_argument(ENUMBER_OF_METADATA_KEYS_AND_VALUES_DONT_MATCH),
+    );
+
+    let multisig_address = address_of(multisig_account);
+    assert_multisig_account_exists(multisig_address);
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_address);
+    let old_metadata = multisig_account_resource.metadata;
+    multisig_account_resource.metadata = simple_map::create<String, vector<u8>>();
+    let metadata = &mut multisig_account_resource.metadata;
+    let i = 0;
+    while (i < num_attributes) {
+        let key = *vector::borrow(&keys, i);
+        let value = *vector::borrow(&values, i);
+        assert!(
+            !simple_map::contains_key(metadata, &key),
+            error::invalid_argument(EDUPLICATE_METADATA_KEY),
+        );
+
+        simple_map::add(metadata, key, value);
+        i = i + 1;
+    };
+
+    if (emit_event) {
+        if (std::features::module_event_migration_enabled()) {
+            emit(
+                MetadataUpdated {
+                    multisig_account: multisig_address,
+                    old_metadata,
+                    new_metadata: multisig_account_resource.metadata,
+                }
+            )
+        };
+        emit_event(
+            &mut multisig_account_resource.metadata_updated_events,
+            MetadataUpdatedEvent {
+                old_metadata,
+                new_metadata: multisig_account_resource.metadata,
+            }
+        );
+    };
+}
+
+ + + +
+ + + +## Function `create_transaction` + +Create a multisig transaction, which will have one approval initially (from the creator). + + +
public entry fun create_transaction(owner: &signer, multisig_account: address, payload: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun create_transaction(
+    owner: &signer,
+    multisig_account: address,
+    payload: vector<u8>,
+) acquires MultisigAccount {
+    assert!(vector::length(&payload) > 0, error::invalid_argument(EPAYLOAD_CANNOT_BE_EMPTY));
+
+    assert_multisig_account_exists(multisig_account);
+    assert_is_owner(owner, multisig_account);
+
+    let creator = address_of(owner);
+    let transaction = MultisigTransaction {
+        payload: option::some(payload),
+        payload_hash: option::none<vector<u8>>(),
+        votes: simple_map::create<address, bool>(),
+        creator,
+        creation_time_secs: now_seconds(),
+    };
+    add_transaction(creator, multisig_account, transaction);
+}
+
+ + + +
+ + + +## Function `create_transaction_with_hash` + +Create a multisig transaction with a transaction hash instead of the full payload. +This means the payload will be stored off chain for gas saving. Later, during execution, the executor will need +to provide the full payload, which will be validated against the hash stored on-chain. + + +
public entry fun create_transaction_with_hash(owner: &signer, multisig_account: address, payload_hash: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun create_transaction_with_hash(
+    owner: &signer,
+    multisig_account: address,
+    payload_hash: vector<u8>,
+) acquires MultisigAccount {
+    // Payload hash is a sha3-256 hash, so it must be exactly 32 bytes.
+    assert!(vector::length(&payload_hash) == 32, error::invalid_argument(EINVALID_PAYLOAD_HASH));
+
+    assert_multisig_account_exists(multisig_account);
+    assert_is_owner(owner, multisig_account);
+
+    let creator = address_of(owner);
+    let transaction = MultisigTransaction {
+        payload: option::none<vector<u8>>(),
+        payload_hash: option::some(payload_hash),
+        votes: simple_map::create<address, bool>(),
+        creator,
+        creation_time_secs: now_seconds(),
+    };
+    add_transaction(creator, multisig_account, transaction);
+}
+
+ + + +
+ + + +## Function `approve_transaction` + +Approve a multisig transaction. + + +
public entry fun approve_transaction(owner: &signer, multisig_account: address, sequence_number: u64)
+
+ + + +
+Implementation + + +
public entry fun approve_transaction(
+    owner: &signer, multisig_account: address, sequence_number: u64) acquires MultisigAccount {
+    vote_transanction(owner, multisig_account, sequence_number, true);
+}
+
+ + + +
+ + + +## Function `reject_transaction` + +Reject a multisig transaction. + + +
public entry fun reject_transaction(owner: &signer, multisig_account: address, sequence_number: u64)
+
+ + + +
+Implementation + + +
public entry fun reject_transaction(
+    owner: &signer, multisig_account: address, sequence_number: u64) acquires MultisigAccount {
+    vote_transanction(owner, multisig_account, sequence_number, false);
+}
+
+ + + +
+ + + +## Function `vote_transanction` + +Generic function that can be used to either approve or reject a multisig transaction +Retained for backward compatibility: the function with the typographical error in its name +will continue to be an accessible entry point. + + +
public entry fun vote_transanction(owner: &signer, multisig_account: address, sequence_number: u64, approved: bool)
+
+ + + +
+Implementation + + +
public entry fun vote_transanction(
+    owner: &signer, multisig_account: address, sequence_number: u64, approved: bool) acquires MultisigAccount {
+    assert_multisig_account_exists(multisig_account);
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    assert_is_owner_internal(owner, multisig_account_resource);
+
+    assert!(
+        table::contains(&multisig_account_resource.transactions, sequence_number),
+        error::not_found(ETRANSACTION_NOT_FOUND),
+    );
+    let transaction = table::borrow_mut(&mut multisig_account_resource.transactions, sequence_number);
+    let votes = &mut transaction.votes;
+    let owner_addr = address_of(owner);
+
+    if (simple_map::contains_key(votes, &owner_addr)) {
+        *simple_map::borrow_mut(votes, &owner_addr) = approved;
+    } else {
+        simple_map::add(votes, owner_addr, approved);
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            Vote {
+                multisig_account,
+                owner: owner_addr,
+                sequence_number,
+                approved,
+            }
+        );
+    };
+    emit_event(
+        &mut multisig_account_resource.vote_events,
+        VoteEvent {
+            owner: owner_addr,
+            sequence_number,
+            approved,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `vote_transaction` + +Generic function that can be used to either approve or reject a multisig transaction + + +
public entry fun vote_transaction(owner: &signer, multisig_account: address, sequence_number: u64, approved: bool)
+
+ + + +
+Implementation + + +
public entry fun vote_transaction(
+    owner: &signer, multisig_account: address, sequence_number: u64, approved: bool) acquires MultisigAccount {
+    assert!(features::multisig_v2_enhancement_feature_enabled(), error::invalid_state(EMULTISIG_V2_ENHANCEMENT_NOT_ENABLED));
+    vote_transanction(owner, multisig_account, sequence_number, approved);
+}
+
+ + + +
+ + + +## Function `vote_transactions` + +Generic function that can be used to either approve or reject a batch of transactions within a specified range. + + +
public entry fun vote_transactions(owner: &signer, multisig_account: address, starting_sequence_number: u64, final_sequence_number: u64, approved: bool)
+
+ + + +
+Implementation + + +
public entry fun vote_transactions(
+    owner: &signer, multisig_account: address, starting_sequence_number: u64, final_sequence_number: u64, approved: bool) acquires MultisigAccount {
+    assert!(features::multisig_v2_enhancement_feature_enabled(), error::invalid_state(EMULTISIG_V2_ENHANCEMENT_NOT_ENABLED));
+    let sequence_number = starting_sequence_number;
+    while(sequence_number <= final_sequence_number) {
+        vote_transanction(owner, multisig_account, sequence_number, approved);
+        sequence_number = sequence_number + 1;
+    }
+}
+
+ + + +
+ + + +## Function `execute_rejected_transaction` + +Remove the next transaction if it has sufficient owner rejections. + + +
public entry fun execute_rejected_transaction(owner: &signer, multisig_account: address)
+
+ + + +
+Implementation + + +
public entry fun execute_rejected_transaction(
+    owner: &signer,
+    multisig_account: address,
+) acquires MultisigAccount {
+    assert_multisig_account_exists(multisig_account);
+    assert_is_owner(owner, multisig_account);
+
+    let sequence_number = last_resolved_sequence_number(multisig_account) + 1;
+    let owner_addr = address_of(owner);
+    if (features::multisig_v2_enhancement_feature_enabled()) {
+        // Implicitly vote for rejection if the owner has not voted for rejection yet.
+        if (!has_voted_for_rejection(multisig_account, sequence_number, owner_addr)) {
+            reject_transaction(owner, multisig_account, sequence_number);
+        }
+    };
+
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let (_, num_rejections) = remove_executed_transaction(multisig_account_resource);
+    assert!(
+        num_rejections >= multisig_account_resource.num_signatures_required,
+        error::invalid_state(ENOT_ENOUGH_REJECTIONS),
+    );
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            ExecuteRejectedTransaction {
+                multisig_account,
+                sequence_number,
+                num_rejections,
+                executor: address_of(owner),
+            }
+        );
+    };
+    emit_event(
+        &mut multisig_account_resource.execute_rejected_transaction_events,
+        ExecuteRejectedTransactionEvent {
+            sequence_number,
+            num_rejections,
+            executor: owner_addr,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `execute_rejected_transactions` + +Remove the next transactions until the final_sequence_number if they have sufficient owner rejections. + + +
public entry fun execute_rejected_transactions(owner: &signer, multisig_account: address, final_sequence_number: u64)
+
+ + + +
+Implementation + + +
public entry fun execute_rejected_transactions(
+    owner: &signer,
+    multisig_account: address,
+    final_sequence_number: u64,
+) acquires MultisigAccount {
+    assert!(features::multisig_v2_enhancement_feature_enabled(), error::invalid_state(EMULTISIG_V2_ENHANCEMENT_NOT_ENABLED));
+    assert!(last_resolved_sequence_number(multisig_account) < final_sequence_number, error::invalid_argument(EINVALID_SEQUENCE_NUMBER));
+    assert!(final_sequence_number < next_sequence_number(multisig_account), error::invalid_argument(EINVALID_SEQUENCE_NUMBER));
+    while(last_resolved_sequence_number(multisig_account) < final_sequence_number) {
+        execute_rejected_transaction(owner, multisig_account);
+    }
+}
+
+ + + +
+ + + +## Function `validate_multisig_transaction` + +Called by the VM as part of transaction prologue, which is invoked during mempool transaction validation and as +the first step of transaction execution. + +Transaction payload is optional if it's already stored on chain for the transaction. + + +
fun validate_multisig_transaction(owner: &signer, multisig_account: address, payload: vector<u8>)
+
+ + + +
+Implementation + + +
fun validate_multisig_transaction(
+    owner: &signer, multisig_account: address, payload: vector<u8>) acquires MultisigAccount {
+    assert_multisig_account_exists(multisig_account);
+    assert_is_owner(owner, multisig_account);
+    let sequence_number = last_resolved_sequence_number(multisig_account) + 1;
+    assert_transaction_exists(multisig_account, sequence_number);
+
+    if (features::multisig_v2_enhancement_feature_enabled()) {
+        assert!(
+            can_execute(address_of(owner), multisig_account, sequence_number),
+            error::invalid_argument(ENOT_ENOUGH_APPROVALS),
+        );
+    }
+    else {
+        assert!(
+            can_be_executed(multisig_account, sequence_number),
+            error::invalid_argument(ENOT_ENOUGH_APPROVALS),
+        );
+    };
+
+    // If the transaction payload is not stored on chain, verify that the provided payload matches the hashes stored
+    // on chain.
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    let transaction = table::borrow(&multisig_account_resource.transactions, sequence_number);
+    if (option::is_some(&transaction.payload_hash)) {
+        let payload_hash = option::borrow(&transaction.payload_hash);
+        assert!(
+            sha3_256(payload) == *payload_hash,
+            error::invalid_argument(EPAYLOAD_DOES_NOT_MATCH_HASH),
+        );
+    };
+
+    // If the transaction payload is stored on chain and there is a provided payload,
+    // verify that the provided payload matches the stored payload.
+    if (features::abort_if_multisig_payload_mismatch_enabled()
+        && option::is_some(&transaction.payload)
+        && !vector::is_empty(&payload)
+    ) {
+        let stored_payload = option::borrow(&transaction.payload);
+        assert!(
+            payload == *stored_payload,
+            error::invalid_argument(EPAYLOAD_DOES_NOT_MATCH),
+        );
+    }
+}
+
+ + + +
+ + + +## Function `successful_transaction_execution_cleanup` + +Post-execution cleanup for a successful multisig transaction execution. +This function is private so no other code can call this beside the VM itself as part of MultisigTransaction. + + +
fun successful_transaction_execution_cleanup(executor: address, multisig_account: address, transaction_payload: vector<u8>)
+
+ + + +
+Implementation + + +
fun successful_transaction_execution_cleanup(
+    executor: address,
+    multisig_account: address,
+    transaction_payload: vector<u8>,
+) acquires MultisigAccount {
+    let num_approvals = transaction_execution_cleanup_common(executor, multisig_account);
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            TransactionExecutionSucceeded {
+                multisig_account,
+                sequence_number: multisig_account_resource.last_executed_sequence_number,
+                transaction_payload,
+                num_approvals,
+                executor,
+            }
+        );
+    };
+    emit_event(
+        &mut multisig_account_resource.execute_transaction_events,
+        TransactionExecutionSucceededEvent {
+            sequence_number: multisig_account_resource.last_executed_sequence_number,
+            transaction_payload,
+            num_approvals,
+            executor,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `failed_transaction_execution_cleanup` + +Post-execution cleanup for a failed multisig transaction execution. +This function is private so no other code can call this beside the VM itself as part of MultisigTransaction. + + +
fun failed_transaction_execution_cleanup(executor: address, multisig_account: address, transaction_payload: vector<u8>, execution_error: multisig_account::ExecutionError)
+
+ + + +
+Implementation + + +
fun failed_transaction_execution_cleanup(
+    executor: address,
+    multisig_account: address,
+    transaction_payload: vector<u8>,
+    execution_error: ExecutionError,
+) acquires MultisigAccount {
+    let num_approvals = transaction_execution_cleanup_common(executor, multisig_account);
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            TransactionExecutionFailed {
+                multisig_account,
+                executor,
+                sequence_number: multisig_account_resource.last_executed_sequence_number,
+                transaction_payload,
+                num_approvals,
+                execution_error,
+            }
+        );
+    };
+    emit_event(
+        &mut multisig_account_resource.transaction_execution_failed_events,
+        TransactionExecutionFailedEvent {
+            executor,
+            sequence_number: multisig_account_resource.last_executed_sequence_number,
+            transaction_payload,
+            num_approvals,
+            execution_error,
+        }
+    );
+}
+
+ + + +
+ + + +## Function `transaction_execution_cleanup_common` + + + +
fun transaction_execution_cleanup_common(executor: address, multisig_account: address): u64
+
+ + + +
+Implementation + + +
inline fun transaction_execution_cleanup_common(executor: address, multisig_account: address): u64 acquires MultisigAccount {
+    let sequence_number = last_resolved_sequence_number(multisig_account) + 1;
+    let implicit_approval = !has_voted_for_approval(multisig_account, sequence_number, executor);
+
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let (num_approvals, _) = remove_executed_transaction(multisig_account_resource);
+
+    if (features::multisig_v2_enhancement_feature_enabled() && implicit_approval) {
+        if (std::features::module_event_migration_enabled()) {
+            emit(
+                Vote {
+                    multisig_account,
+                    owner: executor,
+                    sequence_number,
+                    approved: true,
+                }
+            );
+        };
+        num_approvals = num_approvals + 1;
+        emit_event(
+            &mut multisig_account_resource.vote_events,
+            VoteEvent {
+                owner: executor,
+                sequence_number,
+                approved: true,
+            }
+        );
+    };
+
+    num_approvals
+}
+
+ + + +
+ + + +## Function `remove_executed_transaction` + + + +
fun remove_executed_transaction(multisig_account_resource: &mut multisig_account::MultisigAccount): (u64, u64)
+
+ + + +
+Implementation + + +
fun remove_executed_transaction(multisig_account_resource: &mut MultisigAccount): (u64, u64) {
+    let sequence_number = multisig_account_resource.last_executed_sequence_number + 1;
+    let transaction = table::remove(&mut multisig_account_resource.transactions, sequence_number);
+    multisig_account_resource.last_executed_sequence_number = sequence_number;
+    num_approvals_and_rejections_internal(&multisig_account_resource.owners, &transaction)
+}
+
+ + + +
+ + + +## Function `add_transaction` + + + +
fun add_transaction(creator: address, multisig_account: address, transaction: multisig_account::MultisigTransaction)
+
+ + + +
+Implementation + + +
inline fun add_transaction(
+    creator: address,
+    multisig_account: address,
+    transaction: MultisigTransaction
+) {
+    if (features::multisig_v2_enhancement_feature_enabled()) {
+        assert!(
+            available_transaction_queue_capacity(multisig_account) > 0,
+            error::invalid_state(EMAX_PENDING_TRANSACTIONS_EXCEEDED)
+        );
+    };
+
+    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+
+    // The transaction creator also automatically votes for the transaction.
+    simple_map::add(&mut transaction.votes, creator, true);
+
+    let sequence_number = multisig_account_resource.next_sequence_number;
+    multisig_account_resource.next_sequence_number = sequence_number + 1;
+    table::add(&mut multisig_account_resource.transactions, sequence_number, transaction);
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            CreateTransaction { multisig_account: multisig_account, creator, sequence_number, transaction }
+        );
+    };
+    emit_event(
+        &mut multisig_account_resource.create_transaction_events,
+        CreateTransactionEvent { creator, sequence_number, transaction },
+    );
+}
+
+ + + +
+ + + +## Function `create_multisig_account` + + + +
fun create_multisig_account(owner: &signer): (signer, account::SignerCapability)
+
+ + + +
+Implementation + + +
fun create_multisig_account(owner: &signer): (signer, SignerCapability) {
+    let owner_nonce = account::get_sequence_number(address_of(owner));
+    let (multisig_signer, multisig_signer_cap) =
+        account::create_resource_account(owner, create_multisig_account_seed(to_bytes(&owner_nonce)));
+    // Register the account to receive APT as this is not done by default as part of the resource account creation
+    // flow.
+    if (!coin::is_account_registered<AptosCoin>(address_of(&multisig_signer))) {
+        coin::register<AptosCoin>(&multisig_signer);
+    };
+
+    (multisig_signer, multisig_signer_cap)
+}
+
+ + + +
+ + + +## Function `create_multisig_account_seed` + + + +
fun create_multisig_account_seed(seed: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun create_multisig_account_seed(seed: vector<u8>): vector<u8> {
+    // Generate a seed that will be used to create the resource account that hosts the multisig account.
+    let multisig_account_seed = vector::empty<u8>();
+    vector::append(&mut multisig_account_seed, DOMAIN_SEPARATOR);
+    vector::append(&mut multisig_account_seed, seed);
+
+    multisig_account_seed
+}
+
+ + + +
+ + + +## Function `validate_owners` + + + +
fun validate_owners(owners: &vector<address>, multisig_account: address)
+
+ + + +
+Implementation + + +
fun validate_owners(owners: &vector<address>, multisig_account: address) {
+    let distinct_owners: vector<address> = vector[];
+    vector::for_each_ref(owners, |owner| {
+        let owner = *owner;
+        assert!(owner != multisig_account, error::invalid_argument(EOWNER_CANNOT_BE_MULTISIG_ACCOUNT_ITSELF));
+        let (found, _) = vector::index_of(&distinct_owners, &owner);
+        assert!(!found, error::invalid_argument(EDUPLICATE_OWNER));
+        vector::push_back(&mut distinct_owners, owner);
+    });
+}
+
+ + + +
+ + + +## Function `assert_is_owner_internal` + + + +
fun assert_is_owner_internal(owner: &signer, multisig_account: &multisig_account::MultisigAccount)
+
+ + + +
+Implementation + + +
inline fun assert_is_owner_internal(owner: &signer, multisig_account: &MultisigAccount) {
+    assert!(
+        vector::contains(&multisig_account.owners, &address_of(owner)),
+        error::permission_denied(ENOT_OWNER),
+    );
+}
+
+ + + +
+ + + +## Function `assert_is_owner` + + + +
fun assert_is_owner(owner: &signer, multisig_account: address)
+
+ + + +
+Implementation + + +
inline fun assert_is_owner(owner: &signer, multisig_account: address) acquires MultisigAccount {
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    assert_is_owner_internal(owner, multisig_account_resource);
+}
+
+ + + +
+ + + +## Function `num_approvals_and_rejections_internal` + + + +
fun num_approvals_and_rejections_internal(owners: &vector<address>, transaction: &multisig_account::MultisigTransaction): (u64, u64)
+
+ + + +
+Implementation + + +
inline fun num_approvals_and_rejections_internal(owners: &vector<address>, transaction: &MultisigTransaction): (u64, u64) {
+    let num_approvals = 0;
+    let num_rejections = 0;
+
+    let votes = &transaction.votes;
+    vector::for_each_ref(owners, |owner| {
+        if (simple_map::contains_key(votes, owner)) {
+            if (*simple_map::borrow(votes, owner)) {
+                num_approvals = num_approvals + 1;
+            } else {
+                num_rejections = num_rejections + 1;
+            };
+        }
+    });
+
+    (num_approvals, num_rejections)
+}
+
+ + + +
+ + + +## Function `num_approvals_and_rejections` + + + +
fun num_approvals_and_rejections(multisig_account: address, sequence_number: u64): (u64, u64)
+
+ + + +
+Implementation + + +
inline fun num_approvals_and_rejections(multisig_account: address, sequence_number: u64): (u64, u64) acquires MultisigAccount {
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    let transaction = table::borrow(&multisig_account_resource.transactions, sequence_number);
+    num_approvals_and_rejections_internal(&multisig_account_resource.owners, transaction)
+}
+
+ + + +
+ + + +## Function `has_voted_for_approval` + + + +
fun has_voted_for_approval(multisig_account: address, sequence_number: u64, owner: address): bool
+
+ + + +
+Implementation + + +
inline fun has_voted_for_approval(multisig_account: address, sequence_number: u64, owner: address): bool acquires MultisigAccount {
+    let (voted, vote) = vote(multisig_account, sequence_number, owner);
+    voted && vote
+}
+
+ + + +
+ + + +## Function `has_voted_for_rejection` + + + +
fun has_voted_for_rejection(multisig_account: address, sequence_number: u64, owner: address): bool
+
+ + + +
+Implementation + + +
inline fun has_voted_for_rejection(multisig_account: address, sequence_number: u64, owner: address): bool acquires MultisigAccount {
+    let (voted, vote) = vote(multisig_account, sequence_number, owner);
+    voted && !vote
+}
+
+ + + +
+ + + +## Function `assert_multisig_account_exists` + + + +
fun assert_multisig_account_exists(multisig_account: address)
+
+ + + +
+Implementation + + +
inline fun assert_multisig_account_exists(multisig_account: address) {
+    assert!(exists<MultisigAccount>(multisig_account), error::invalid_state(EACCOUNT_NOT_MULTISIG));
+}
+
+ + + +
+ + + +## Function `assert_valid_sequence_number` + + + +
fun assert_valid_sequence_number(multisig_account: address, sequence_number: u64)
+
+ + + +
+Implementation + + +
inline fun assert_valid_sequence_number(multisig_account: address, sequence_number: u64) acquires MultisigAccount {
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    assert!(
+        sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number,
+        error::invalid_argument(EINVALID_SEQUENCE_NUMBER),
+    );
+}
+
+ + + +
+ + + +## Function `assert_transaction_exists` + + + +
fun assert_transaction_exists(multisig_account: address, sequence_number: u64)
+
+ + + +
+Implementation + + +
inline fun assert_transaction_exists(multisig_account: address, sequence_number: u64) acquires MultisigAccount {
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
+    assert!(
+        table::contains(&multisig_account_resource.transactions, sequence_number),
+        error::not_found(ETRANSACTION_NOT_FOUND),
+    );
+}
+
+ + + +
+ + + +## Function `update_owner_schema` + +Add new owners, remove owners to remove, update signatures required. + + +
fun update_owner_schema(multisig_address: address, new_owners: vector<address>, owners_to_remove: vector<address>, optional_new_num_signatures_required: option::Option<u64>)
+
+ + + +
+Implementation + + +
fun update_owner_schema(
+    multisig_address: address,
+    new_owners: vector<address>,
+    owners_to_remove: vector<address>,
+    optional_new_num_signatures_required: Option<u64>,
+) acquires MultisigAccount {
+    assert_multisig_account_exists(multisig_address);
+    let multisig_account_ref_mut =
+        borrow_global_mut<MultisigAccount>(multisig_address);
+    // Verify no overlap between new owners and owners to remove.
+    vector::for_each_ref(&new_owners, |new_owner_ref| {
+        assert!(
+            !vector::contains(&owners_to_remove, new_owner_ref),
+            error::invalid_argument(EOWNERS_TO_REMOVE_NEW_OWNERS_OVERLAP)
+        )
+    });
+    // If new owners provided, try to add them and emit an event.
+    if (vector::length(&new_owners) > 0) {
+        vector::append(&mut multisig_account_ref_mut.owners, new_owners);
+        validate_owners(
+            &multisig_account_ref_mut.owners,
+            multisig_address
+        );
+        if (std::features::module_event_migration_enabled()) {
+            emit(AddOwners { multisig_account: multisig_address, owners_added: new_owners });
+        };
+        emit_event(
+            &mut multisig_account_ref_mut.add_owners_events,
+            AddOwnersEvent { owners_added: new_owners }
+        );
+    };
+    // If owners to remove provided, try to remove them.
+    if (vector::length(&owners_to_remove) > 0) {
+        let owners_ref_mut = &mut multisig_account_ref_mut.owners;
+        let owners_removed = vector[];
+        vector::for_each_ref(&owners_to_remove, |owner_to_remove_ref| {
+            let (found, index) =
+                vector::index_of(owners_ref_mut, owner_to_remove_ref);
+            if (found) {
+                vector::push_back(
+                    &mut owners_removed,
+                    vector::swap_remove(owners_ref_mut, index)
+                );
+            }
+        });
+        // Only emit event if owner(s) actually removed.
+        if (vector::length(&owners_removed) > 0) {
+            if (std::features::module_event_migration_enabled()) {
+                emit(
+                    RemoveOwners { multisig_account: multisig_address, owners_removed }
+                );
+            };
+            emit_event(
+                &mut multisig_account_ref_mut.remove_owners_events,
+                RemoveOwnersEvent { owners_removed }
+            );
+        }
+    };
+    // If new signature count provided, try to update count.
+    if (option::is_some(&optional_new_num_signatures_required)) {
+        let new_num_signatures_required =
+            option::extract(&mut optional_new_num_signatures_required);
+        assert!(
+            new_num_signatures_required > 0,
+            error::invalid_argument(EINVALID_SIGNATURES_REQUIRED)
+        );
+        let old_num_signatures_required =
+            multisig_account_ref_mut.num_signatures_required;
+        // Only apply update and emit event if a change indicated.
+        if (new_num_signatures_required != old_num_signatures_required) {
+            multisig_account_ref_mut.num_signatures_required =
+                new_num_signatures_required;
+            if (std::features::module_event_migration_enabled()) {
+                emit(
+                    UpdateSignaturesRequired {
+                        multisig_account: multisig_address,
+                        old_num_signatures_required,
+                        new_num_signatures_required,
+                    }
+                );
+            };
+            emit_event(
+                &mut multisig_account_ref_mut.update_signature_required_events,
+                UpdateSignaturesRequiredEvent {
+                    old_num_signatures_required,
+                    new_num_signatures_required,
+                }
+            );
+        }
+    };
+    // Verify number of owners.
+    let num_owners = vector::length(&multisig_account_ref_mut.owners);
+    assert!(
+        num_owners >= multisig_account_ref_mut.num_signatures_required,
+        error::invalid_state(ENOT_ENOUGH_OWNERS)
+    );
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1For every multi-signature account, the range of required signatures should always be in the range of one to the total number of owners.CriticalWhile creating a MultisigAccount, the function create_with_owners_internal checks that num_signatures_required is in the span from 1 to total count of owners.This has been audited.
2The list of owners for a multi-signature account should not contain any duplicate owners, and the multi-signature account itself cannot be listed as one of its owners.CriticalThe function validate_owners validates the owner vector that no duplicate entries exists.This has been audited.
3The current value of the next sequence number should not be present in the transaction table, until the next sequence number gets increased.MediumThe add_transaction function increases the next sequence number and only then adds the transaction with the old next sequence number to the transaction table.This has been audited.
4When the last executed sequence number is smaller than the next sequence number by only one unit, no transactions should exist in the multi-signature account's transactions list.HighThe get_pending_transactions function retrieves pending transactions by iterating through the transactions table, starting from the last_executed_sequence_number + 1 to the next_sequence_number.Audited that MultisigAccount.transactions is empty when last_executed_sequence_number == next_sequence_number -1
5The last executed sequence number is always smaller than the next sequence number.MediumWhen creating a new MultisigAccount, the last_executed_sequence_number and next_sequence_number are assigned with 0 and 1 respectively, and from there both these values increase monotonically when a transaction is executed and removed from the table and when new transaction are added respectively.This has been audited.
6The number of pending transactions should be equal to the difference between the next sequence number and the last executed sequence number.HighWhen a transaction is added, next_sequence_number is incremented. And when a transaction is removed after execution, last_executed_sequence_number is incremented.This has been audited.
7Only transactions with valid sequence number should be fetched.MediumFunctions such as: 1. get_transaction 2. can_be_executed 3. can_be_rejected 4. vote always validate the given sequence number and only then fetch the associated transaction.Audited that it aborts if the sequence number is not valid.
8The execution or rejection of a transaction should enforce that the minimum number of required signatures is less or equal to the total number of approvals.CriticalThe functions can_be_executed and can_be_rejected perform validation on the number of votes required for execution or rejection.Audited that these functions return the correct value.
9The creation of a multi-signature account properly initializes the resources and then it gets published under the corresponding account.MediumWhen creating a MultisigAccount via one of the functions: create_with_existing_account, create_with_existing_account_and_revoke_auth_key, create_with_owners, create, the MultisigAccount data is initialized properly and published to the multisig_account (new or existing).Audited that the MultisigAccount is initialized properly.
10Creation of a multi-signature account on top of an existing account should revoke auth key and any previous offered capabilities or control.CriticalThe function create_with_existing_account_and_revoke_auth_key, after successfully creating the MultisigAccount, rotates the account to ZeroAuthKey and revokes any offered capabilities of that account.Audited that the account's auth key and the offered capabilities are revoked.
11Upon the creation of a multi-signature account from a bootstrapping account, the ownership of the resultant account should not pertain to the bootstrapping account.HighIn create_with_owners_then_remove_bootstrapper function after successful creation of the account the bootstrapping account is removed from the owner vector of the account.Audited that the bootstrapping account is not in the owners list.
12Performing any changes on the list of owners such as adding new owners, removing owners, swapping owners should ensure that the number of required signature, for the multi-signature account remains valid.CriticalThe following function as used to modify the owners list and the required signature of the account: add_owner, add_owners, add_owners_and_update_signatures_required, remove_owner, remove_owners, swap_owner, swap_owners, swap_owners_and_update_signatures_required, update_signatures_required. All of these functions use update_owner_schema function to process these changes, the function validates the owner list while adding and verifies that the account has enough required signatures and updates the owner's schema.Audited that the owners are added successfully. (add_owner, add_owners, add_owners_and_update_signatures_required, swap_owner, swap_owners, swap_owners_and_update_signatures_required, update_owner_schema) Audited that the owners are removed successfully. (remove_owner, remove_owners, swap_owner, swap_owners, swap_owners_and_update_signatures_required, update_owner_schema) Audited that the num_signatures_required is updated successfully. (add_owners_and_update_signatures_required, swap_owners_and_update_signatures_required, update_signatures_required, update_owner_schema)
13The creation of a transaction should be limited to an account owner, which should be automatically considered a voter; additionally, the account's sequence should increase monotonically.CriticalThe following functions can only be called by the owners of the account and create a transaction and uses add_transaction function to gives approval on behalf of the creator and increments the next_sequence_number and finally adds the transaction to the MultsigAccount: create_transaction_with_hash, create_transaction.Audited it aborts if the caller is not in the owner's list of the account. (create_transaction_with_hash, create_transaction) Audited that the transaction is successfully stored in the MultisigAccount.(create_transaction_with_hash, create_transaction, add_transaction) Audited that the creators voted to approve the transaction. (create_transaction_with_hash, create_transaction, add_transaction) Audited that the next_sequence_number increases monotonically. (create_transaction_with_hash, create_transaction, add_transaction)
14Only owners are allowed to vote for a valid transaction.CriticalAny owner of the MultisigAccount can either approve (approve_transaction) or reject (reject_transaction) a transaction. Both these functions use a generic function to vote for the transaction which validates the caller and the transaction id and adds/updates the vote.Audited that it aborts if the caller is not in the owner's list (approve_transaction, reject_transaction, vote_transaction, assert_is_owner). Audited that it aborts if the transaction with the given sequence number doesn't exist in the account (approve_transaction, reject_transaction, vote_transaction). Audited that the vote is recorded as intended.
15Only owners are allowed to execute a valid transaction, if the number of approvals meets the k-of-n criteria, finally the executed transaction should be removed.CriticalFunctions execute_rejected_transaction and validate_multisig_transaction can only be called by the owner which validates the transaction and based on the number of approvals and rejections it proceeds to execute the transactions. For rejected transaction, the transactions are immediately removed from the MultisigAccount via remove_executed_transaction. VM validates the transaction via validate_multisig_transaction and cleans up the transaction via successful_transaction_execution_cleanup and failed_transaction_execution_cleanup.Audited that it aborts if the caller is not in the owner's list (execute_rejected_transaction, validate_multisig_transaction). Audited that it aborts if the transaction with the given sequence number doesn't exist in the account (execute_rejected_transaction, validate_multisig_transaction). Audited that it aborts if the votes (approvals or rejections) are less than num_signatures_required (execute_rejected_transaction, validate_multisig_transaction). Audited that the transaction is removed from the MultisigAccount (execute_rejected_transaction, remove_executed_transaction, successful_transaction_execution_cleanup, failed_transaction_execution_cleanup).
16Removing an executed transaction from the transactions list should increase the last sequence number monotonically.HighWhen transactions are removed via remove_executed_transaction (maybe called by VM cleanup or execute_rejected_transaction), the last_executed_sequence_number increases by 1.Audited that last_executed_sequence_number is incremented.
17The voting and transaction creation operations should only be available if a multi-signature account exists.LowThe function assert_multisig_account_exists validates the existence of MultisigAccount under the account.Audited that it aborts if the MultisigAccount doesn't exist on the account.
+ + + + + +### Module-level Specification + + + + +### Function `metadata` + + +
#[view]
+public fun metadata(multisig_account: address): simple_map::SimpleMap<string::String, vector<u8>>
+
+ + + + +
aborts_if !exists<MultisigAccount>(multisig_account);
+ensures result == global<MultisigAccount>(multisig_account).metadata;
+
+ + + + + +### Function `num_signatures_required` + + +
#[view]
+public fun num_signatures_required(multisig_account: address): u64
+
+ + + + +
aborts_if !exists<MultisigAccount>(multisig_account);
+ensures result == global<MultisigAccount>(multisig_account).num_signatures_required;
+
+ + + + + +### Function `owners` + + +
#[view]
+public fun owners(multisig_account: address): vector<address>
+
+ + + + +
aborts_if !exists<MultisigAccount>(multisig_account);
+ensures result == global<MultisigAccount>(multisig_account).owners;
+
+ + + + + +### Function `get_transaction` + + +
#[view]
+public fun get_transaction(multisig_account: address, sequence_number: u64): multisig_account::MultisigTransaction
+
+ + + + +
let multisig_account_resource = global<MultisigAccount>(multisig_account);
+aborts_if !exists<MultisigAccount>(multisig_account);
+aborts_if sequence_number == 0 || sequence_number >= multisig_account_resource.next_sequence_number;
+aborts_if !table::spec_contains(multisig_account_resource.transactions, sequence_number);
+ensures result == table::spec_get(multisig_account_resource.transactions, sequence_number);
+
+ + + + + +### Function `get_next_transaction_payload` + + +
#[view]
+public fun get_next_transaction_payload(multisig_account: address, provided_payload: vector<u8>): vector<u8>
+
+ + + + +
let multisig_account_resource = global<MultisigAccount>(multisig_account);
+let sequence_number = multisig_account_resource.last_executed_sequence_number + 1;
+let transaction = table::spec_get(multisig_account_resource.transactions, sequence_number);
+aborts_if !exists<MultisigAccount>(multisig_account);
+aborts_if multisig_account_resource.last_executed_sequence_number + 1 > MAX_U64;
+aborts_if !table::spec_contains(multisig_account_resource.transactions, sequence_number);
+ensures option::spec_is_none(transaction.payload) ==> result == provided_payload;
+
+ + + + + +### Function `get_next_multisig_account_address` + + +
#[view]
+public fun get_next_multisig_account_address(creator: address): address
+
+ + + + +
aborts_if !exists<account::Account>(creator);
+let owner_nonce = global<account::Account>(creator).sequence_number;
+
+ + + + + +### Function `last_resolved_sequence_number` + + +
#[view]
+public fun last_resolved_sequence_number(multisig_account: address): u64
+
+ + + + +
let multisig_account_resource = global<MultisigAccount>(multisig_account);
+aborts_if !exists<MultisigAccount>(multisig_account);
+ensures result == multisig_account_resource.last_executed_sequence_number;
+
+ + + + + +### Function `next_sequence_number` + + +
#[view]
+public fun next_sequence_number(multisig_account: address): u64
+
+ + + + +
let multisig_account_resource = global<MultisigAccount>(multisig_account);
+aborts_if !exists<MultisigAccount>(multisig_account);
+ensures result == multisig_account_resource.next_sequence_number;
+
+ + + + + +### Function `vote` + + +
#[view]
+public fun vote(multisig_account: address, sequence_number: u64, owner: address): (bool, bool)
+
+ + + + +
let multisig_account_resource = global<MultisigAccount>(multisig_account);
+aborts_if !exists<MultisigAccount>(multisig_account);
+aborts_if sequence_number == 0 || sequence_number >= multisig_account_resource.next_sequence_number;
+aborts_if !table::spec_contains(multisig_account_resource.transactions, sequence_number);
+let transaction = table::spec_get(multisig_account_resource.transactions, sequence_number);
+let votes = transaction.votes;
+let voted = simple_map::spec_contains_key(votes, owner);
+let vote = voted && simple_map::spec_get(votes, owner);
+ensures result_1 == voted;
+ensures result_2 == vote;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/object.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/object.md new file mode 100644 index 0000000000000..7a5f1c49ba86e --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/object.md @@ -0,0 +1,3398 @@ + + + +# Module `0x1::object` + +This defines the Move object model with the following properties: +- Simplified storage interface that supports a heterogeneous collection of resources to be +stored together. This enables data types to share a common core data layer (e.g., tokens), +while having richer extensions (e.g., concert ticket, sword). +- Globally accessible data and ownership model that enables creators and developers to dictate +the application and lifetime of data. +- Extensible programming model that supports individualization of user applications that +leverage the core framework including tokens. +- Support emitting events directly, thus improving discoverability of events associated with +objects. +- Considerate of the underlying system by leveraging resource groups for gas efficiency, +avoiding costly deserialization and serialization costs, and supporting deletability. + +TODO: +* There is no means to borrow an object or a reference to an object. We are exploring how to +make it so that a reference to a global object can be returned from a function. + + +- [Resource `ObjectCore`](#0x1_object_ObjectCore) +- [Resource `TombStone`](#0x1_object_TombStone) +- [Resource `Untransferable`](#0x1_object_Untransferable) +- [Struct `ObjectGroup`](#0x1_object_ObjectGroup) +- [Struct `Object`](#0x1_object_Object) +- [Struct `ConstructorRef`](#0x1_object_ConstructorRef) +- [Struct `DeleteRef`](#0x1_object_DeleteRef) +- [Struct `ExtendRef`](#0x1_object_ExtendRef) +- [Struct `TransferRef`](#0x1_object_TransferRef) +- [Struct `LinearTransferRef`](#0x1_object_LinearTransferRef) +- [Struct `DeriveRef`](#0x1_object_DeriveRef) +- [Struct `TransferEvent`](#0x1_object_TransferEvent) +- [Struct `Transfer`](#0x1_object_Transfer) +- [Constants](#@Constants_0) +- [Function `is_untransferable`](#0x1_object_is_untransferable) +- [Function `is_burnt`](#0x1_object_is_burnt) +- [Function `address_to_object`](#0x1_object_address_to_object) +- [Function `is_object`](#0x1_object_is_object) +- [Function `object_exists`](#0x1_object_object_exists) +- [Function `create_object_address`](#0x1_object_create_object_address) +- [Function `create_user_derived_object_address_impl`](#0x1_object_create_user_derived_object_address_impl) +- [Function `create_user_derived_object_address`](#0x1_object_create_user_derived_object_address) +- [Function `create_guid_object_address`](#0x1_object_create_guid_object_address) +- [Function `exists_at`](#0x1_object_exists_at) +- [Function `object_address`](#0x1_object_object_address) +- [Function `convert`](#0x1_object_convert) +- [Function `create_named_object`](#0x1_object_create_named_object) +- [Function `create_user_derived_object`](#0x1_object_create_user_derived_object) +- [Function `create_object`](#0x1_object_create_object) +- [Function `create_sticky_object`](#0x1_object_create_sticky_object) +- [Function `create_sticky_object_at_address`](#0x1_object_create_sticky_object_at_address) +- [Function `create_object_from_account`](#0x1_object_create_object_from_account) +- [Function `create_object_from_object`](#0x1_object_create_object_from_object) +- [Function `create_object_from_guid`](#0x1_object_create_object_from_guid) +- [Function `create_object_internal`](#0x1_object_create_object_internal) +- [Function `generate_delete_ref`](#0x1_object_generate_delete_ref) +- [Function `generate_extend_ref`](#0x1_object_generate_extend_ref) +- [Function `generate_transfer_ref`](#0x1_object_generate_transfer_ref) +- [Function `generate_derive_ref`](#0x1_object_generate_derive_ref) +- [Function `generate_signer`](#0x1_object_generate_signer) +- [Function `address_from_constructor_ref`](#0x1_object_address_from_constructor_ref) +- [Function `object_from_constructor_ref`](#0x1_object_object_from_constructor_ref) +- [Function `can_generate_delete_ref`](#0x1_object_can_generate_delete_ref) +- [Function `create_guid`](#0x1_object_create_guid) +- [Function `new_event_handle`](#0x1_object_new_event_handle) +- [Function `address_from_delete_ref`](#0x1_object_address_from_delete_ref) +- [Function `object_from_delete_ref`](#0x1_object_object_from_delete_ref) +- [Function `delete`](#0x1_object_delete) +- [Function `generate_signer_for_extending`](#0x1_object_generate_signer_for_extending) +- [Function `address_from_extend_ref`](#0x1_object_address_from_extend_ref) +- [Function `disable_ungated_transfer`](#0x1_object_disable_ungated_transfer) +- [Function `set_untransferable`](#0x1_object_set_untransferable) +- [Function `enable_ungated_transfer`](#0x1_object_enable_ungated_transfer) +- [Function `generate_linear_transfer_ref`](#0x1_object_generate_linear_transfer_ref) +- [Function `transfer_with_ref`](#0x1_object_transfer_with_ref) +- [Function `transfer_call`](#0x1_object_transfer_call) +- [Function `transfer`](#0x1_object_transfer) +- [Function `transfer_raw`](#0x1_object_transfer_raw) +- [Function `transfer_raw_inner`](#0x1_object_transfer_raw_inner) +- [Function `transfer_to_object`](#0x1_object_transfer_to_object) +- [Function `verify_ungated_and_descendant`](#0x1_object_verify_ungated_and_descendant) +- [Function `burn`](#0x1_object_burn) +- [Function `unburn`](#0x1_object_unburn) +- [Function `ungated_transfer_allowed`](#0x1_object_ungated_transfer_allowed) +- [Function `owner`](#0x1_object_owner) +- [Function `is_owner`](#0x1_object_is_owner) +- [Function `owns`](#0x1_object_owns) +- [Function `root_owner`](#0x1_object_root_owner) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `address_to_object`](#@Specification_1_address_to_object) + - [Function `create_object_address`](#@Specification_1_create_object_address) + - [Function `create_user_derived_object_address_impl`](#@Specification_1_create_user_derived_object_address_impl) + - [Function `create_user_derived_object_address`](#@Specification_1_create_user_derived_object_address) + - [Function `create_guid_object_address`](#@Specification_1_create_guid_object_address) + - [Function `exists_at`](#@Specification_1_exists_at) + - [Function `object_address`](#@Specification_1_object_address) + - [Function `convert`](#@Specification_1_convert) + - [Function `create_named_object`](#@Specification_1_create_named_object) + - [Function `create_user_derived_object`](#@Specification_1_create_user_derived_object) + - [Function `create_object`](#@Specification_1_create_object) + - [Function `create_sticky_object`](#@Specification_1_create_sticky_object) + - [Function `create_sticky_object_at_address`](#@Specification_1_create_sticky_object_at_address) + - [Function `create_object_from_account`](#@Specification_1_create_object_from_account) + - [Function `create_object_from_object`](#@Specification_1_create_object_from_object) + - [Function `create_object_from_guid`](#@Specification_1_create_object_from_guid) + - [Function `create_object_internal`](#@Specification_1_create_object_internal) + - [Function `generate_delete_ref`](#@Specification_1_generate_delete_ref) + - [Function `generate_transfer_ref`](#@Specification_1_generate_transfer_ref) + - [Function `object_from_constructor_ref`](#@Specification_1_object_from_constructor_ref) + - [Function `create_guid`](#@Specification_1_create_guid) + - [Function `new_event_handle`](#@Specification_1_new_event_handle) + - [Function `object_from_delete_ref`](#@Specification_1_object_from_delete_ref) + - [Function `delete`](#@Specification_1_delete) + - [Function `disable_ungated_transfer`](#@Specification_1_disable_ungated_transfer) + - [Function `set_untransferable`](#@Specification_1_set_untransferable) + - [Function `enable_ungated_transfer`](#@Specification_1_enable_ungated_transfer) + - [Function `generate_linear_transfer_ref`](#@Specification_1_generate_linear_transfer_ref) + - [Function `transfer_with_ref`](#@Specification_1_transfer_with_ref) + - [Function `transfer_call`](#@Specification_1_transfer_call) + - [Function `transfer`](#@Specification_1_transfer) + - [Function `transfer_raw`](#@Specification_1_transfer_raw) + - [Function `transfer_to_object`](#@Specification_1_transfer_to_object) + - [Function `verify_ungated_and_descendant`](#@Specification_1_verify_ungated_and_descendant) + - [Function `burn`](#@Specification_1_burn) + - [Function `unburn`](#@Specification_1_unburn) + - [Function `ungated_transfer_allowed`](#@Specification_1_ungated_transfer_allowed) + - [Function `owner`](#@Specification_1_owner) + - [Function `is_owner`](#@Specification_1_is_owner) + - [Function `owns`](#@Specification_1_owns) + - [Function `root_owner`](#@Specification_1_root_owner) + + +
use 0x1::account;
+use 0x1::bcs;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::from_bcs;
+use 0x1::guid;
+use 0x1::hash;
+use 0x1::signer;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Resource `ObjectCore` + +The core of the object model that defines ownership, transferability, and events. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct ObjectCore has key
+
+ + + +
+Fields + + +
+
+guid_creation_num: u64 +
+
+ Used by guid to guarantee globally unique objects and create event streams +
+
+owner: address +
+
+ The address (object or account) that owns this object +
+
+allow_ungated_transfer: bool +
+
+ Object transferring is a common operation, this allows for disabling and enabling + transfers bypassing the use of a TransferRef. +
+
+transfer_events: event::EventHandle<object::TransferEvent> +
+
+ Emitted events upon transferring of ownership. +
+
+ + +
+ + + +## Resource `TombStone` + +This is added to objects that are burnt (ownership transferred to BURN_ADDRESS). + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct TombStone has key
+
+ + + +
+Fields + + +
+
+original_owner: address +
+
+ Track the previous owner before the object is burnt so they can reclaim later if so desired. +
+
+ + +
+ + + +## Resource `Untransferable` + +The existence of this renders all TransferRefs irrelevant. The object cannot be moved. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct Untransferable has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `ObjectGroup` + +A shared resource group for storing object resources together in storage. + + +
#[resource_group(#[scope = global])]
+struct ObjectGroup
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `Object` + +A pointer to an object -- these can only provide guarantees based upon the underlying data +type, that is the validity of T existing at an address is something that cannot be verified +by any other module than the module that defined T. Similarly, the module that defines T +can remove it from storage at any point in time. + + +
struct Object<T> has copy, drop, store
+
+ + + +
+Fields + + +
+
+inner: address +
+
+ +
+
+ + +
+ + + +## Struct `ConstructorRef` + +This is a one time ability given to the creator to configure the object as necessary + + +
struct ConstructorRef has drop
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+can_delete: bool +
+
+ True if the object can be deleted. Named objects are not deletable. +
+
+ + +
+ + + +## Struct `DeleteRef` + +Used to remove an object from storage. + + +
struct DeleteRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Struct `ExtendRef` + +Used to create events or move additional resources into object storage. + + +
struct ExtendRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Struct `TransferRef` + +Used to create LinearTransferRef, hence ownership transfer. + + +
struct TransferRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Struct `LinearTransferRef` + +Used to perform transfers. This locks transferring ability to a single time use bound to +the current owner. + + +
struct LinearTransferRef has drop
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+owner: address +
+
+ +
+
+ + +
+ + + +## Struct `DeriveRef` + +Used to create derived objects from a given objects. + + +
struct DeriveRef has drop, store
+
+ + + +
+Fields + + +
+
+self: address +
+
+ +
+
+ + +
+ + + +## Struct `TransferEvent` + +Emitted whenever the object's owner field is changed. + + +
struct TransferEvent has drop, store
+
+ + + +
+Fields + + +
+
+object: address +
+
+ +
+
+from: address +
+
+ +
+
+to: address +
+
+ +
+
+ + +
+ + + +## Struct `Transfer` + +Emitted whenever the object's owner field is changed. + + +
#[event]
+struct Transfer has drop, store
+
+ + + +
+Fields + + +
+
+object: address +
+
+ +
+
+from: address +
+
+ +
+
+to: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Address where unwanted objects can be forcefully transferred to. + + +
const BURN_ADDRESS: address = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff;
+
+ + + + + +generate_unique_address uses this for domain separation within its native implementation + + +
const DERIVE_AUID_ADDRESS_SCHEME: u8 = 251;
+
+ + + + + +The object does not allow for deletion + + +
const ECANNOT_DELETE: u64 = 5;
+
+ + + + + +Exceeds maximum nesting for an object transfer. + + +
const EMAXIMUM_NESTING: u64 = 6;
+
+ + + + + +The caller does not have ownership permissions + + +
const ENOT_OBJECT_OWNER: u64 = 4;
+
+ + + + + +The object does not have ungated transfers enabled + + +
const ENO_UNGATED_TRANSFERS: u64 = 3;
+
+ + + + + +An object does not exist at this address + + +
const EOBJECT_DOES_NOT_EXIST: u64 = 2;
+
+ + + + + +An object already exists at this address + + +
const EOBJECT_EXISTS: u64 = 1;
+
+ + + + + +Cannot reclaim objects that weren't burnt. + + +
const EOBJECT_NOT_BURNT: u64 = 8;
+
+ + + + + +Object is untransferable any operations that might result in a transfer are disallowed. + + +
const EOBJECT_NOT_TRANSFERRABLE: u64 = 9;
+
+ + + + + +The resource is not stored at the specified address. + + +
const ERESOURCE_DOES_NOT_EXIST: u64 = 7;
+
+ + + + + +Explicitly separate the GUID space between Object and Account to prevent accidental overlap. + + +
const INIT_GUID_CREATION_NUM: u64 = 1125899906842624;
+
+ + + + + +Maximum nesting from one object to another. That is objects can technically have infinte +nesting, but any checks such as transfer will only be evaluated this deep. + + +
const MAXIMUM_OBJECT_NESTING: u8 = 8;
+
+ + + + + +Scheme identifier used to generate an object's address obj_addr as derived from another object. +The object's address is generated as: +``` +obj_addr = sha3_256(account addr | derived from object's address | 0xFC) +``` + +This 0xFC constant serves as a domain separation tag to prevent existing authentication key and resource account +derivation to produce an object address. + + +
const OBJECT_DERIVED_SCHEME: u8 = 252;
+
+ + + + + +Scheme identifier used to generate an object's address obj_addr via a fresh GUID generated by the creator at +source_addr. The object's address is generated as: +``` +obj_addr = sha3_256(guid | 0xFD) +``` +where guid = account::create_guid(create_signer(source_addr)) + +This 0xFD constant serves as a domain separation tag to prevent existing authentication key and resource account +derivation to produce an object address. + + +
const OBJECT_FROM_GUID_ADDRESS_SCHEME: u8 = 253;
+
+ + + + + +Scheme identifier used to generate an object's address obj_addr from the creator's source_addr and a seed as: +obj_addr = sha3_256(source_addr | seed | 0xFE). + +This 0xFE constant serves as a domain separation tag to prevent existing authentication key and resource account +derivation to produce an object address. + + +
const OBJECT_FROM_SEED_ADDRESS_SCHEME: u8 = 254;
+
+ + + + + +## Function `is_untransferable` + + + +
#[view]
+public fun is_untransferable<T: key>(object: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_untransferable<T: key>(object: Object<T>): bool {
+    exists<Untransferable>(object.inner)
+}
+
+ + + +
+ + + +## Function `is_burnt` + + + +
#[view]
+public fun is_burnt<T: key>(object: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_burnt<T: key>(object: Object<T>): bool {
+    exists<TombStone>(object.inner)
+}
+
+ + + +
+ + + +## Function `address_to_object` + +Produces an ObjectId from the given address. This is not verified. + + +
public fun address_to_object<T: key>(object: address): object::Object<T>
+
+ + + +
+Implementation + + +
public fun address_to_object<T: key>(object: address): Object<T> {
+    assert!(exists<ObjectCore>(object), error::not_found(EOBJECT_DOES_NOT_EXIST));
+    assert!(exists_at<T>(object), error::not_found(ERESOURCE_DOES_NOT_EXIST));
+    Object<T> { inner: object }
+}
+
+ + + +
+ + + +## Function `is_object` + +Returns true if there exists an object or the remnants of an object. + + +
public fun is_object(object: address): bool
+
+ + + +
+Implementation + + +
public fun is_object(object: address): bool {
+    exists<ObjectCore>(object)
+}
+
+ + + +
+ + + +## Function `object_exists` + +Returns true if there exists an object with resource T. + + +
public fun object_exists<T: key>(object: address): bool
+
+ + + +
+Implementation + + +
public fun object_exists<T: key>(object: address): bool {
+    exists<ObjectCore>(object) && exists_at<T>(object)
+}
+
+ + + +
+ + + +## Function `create_object_address` + +Derives an object address from source material: sha3_256([creator address | seed | 0xFE]). + + +
public fun create_object_address(source: &address, seed: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun create_object_address(source: &address, seed: vector<u8>): address {
+    let bytes = bcs::to_bytes(source);
+    vector::append(&mut bytes, seed);
+    vector::push_back(&mut bytes, OBJECT_FROM_SEED_ADDRESS_SCHEME);
+    from_bcs::to_address(hash::sha3_256(bytes))
+}
+
+ + + +
+ + + +## Function `create_user_derived_object_address_impl` + + + +
fun create_user_derived_object_address_impl(source: address, derive_from: address): address
+
+ + + +
+Implementation + + +
native fun create_user_derived_object_address_impl(source: address, derive_from: address): address;
+
+ + + +
+ + + +## Function `create_user_derived_object_address` + +Derives an object address from the source address and an object: sha3_256([source | object addr | 0xFC]). + + +
public fun create_user_derived_object_address(source: address, derive_from: address): address
+
+ + + +
+Implementation + + +
public fun create_user_derived_object_address(source: address, derive_from: address): address {
+    if (std::features::object_native_derived_address_enabled()) {
+        create_user_derived_object_address_impl(source, derive_from)
+    } else {
+        let bytes = bcs::to_bytes(&source);
+        vector::append(&mut bytes, bcs::to_bytes(&derive_from));
+        vector::push_back(&mut bytes, OBJECT_DERIVED_SCHEME);
+        from_bcs::to_address(hash::sha3_256(bytes))
+    }
+}
+
+ + + +
+ + + +## Function `create_guid_object_address` + +Derives an object from an Account GUID. + + +
public fun create_guid_object_address(source: address, creation_num: u64): address
+
+ + + +
+Implementation + + +
public fun create_guid_object_address(source: address, creation_num: u64): address {
+    let id = guid::create_id(source, creation_num);
+    let bytes = bcs::to_bytes(&id);
+    vector::push_back(&mut bytes, OBJECT_FROM_GUID_ADDRESS_SCHEME);
+    from_bcs::to_address(hash::sha3_256(bytes))
+}
+
+ + + +
+ + + +## Function `exists_at` + + + +
fun exists_at<T: key>(object: address): bool
+
+ + + +
+Implementation + + +
native fun exists_at<T: key>(object: address): bool;
+
+ + + +
+ + + +## Function `object_address` + +Returns the address of within an ObjectId. + + +
public fun object_address<T: key>(object: &object::Object<T>): address
+
+ + + +
+Implementation + + +
public fun object_address<T: key>(object: &Object<T>): address {
+    object.inner
+}
+
+ + + +
+ + + +## Function `convert` + +Convert Object to Object. + + +
public fun convert<X: key, Y: key>(object: object::Object<X>): object::Object<Y>
+
+ + + +
+Implementation + + +
public fun convert<X: key, Y: key>(object: Object<X>): Object<Y> {
+    address_to_object<Y>(object.inner)
+}
+
+ + + +
+ + + +## Function `create_named_object` + +Create a new named object and return the ConstructorRef. Named objects can be queried globally +by knowing the user generated seed used to create them. Named objects cannot be deleted. + + +
public fun create_named_object(creator: &signer, seed: vector<u8>): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_object(creator: &signer, seed: vector<u8>): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let obj_addr = create_object_address(&creator_address, seed);
+    create_object_internal(creator_address, obj_addr, false)
+}
+
+ + + +
+ + + +## Function `create_user_derived_object` + +Create a new object whose address is derived based on the creator account address and another object. +Derivde objects, similar to named objects, cannot be deleted. + + +
public(friend) fun create_user_derived_object(creator_address: address, derive_ref: &object::DeriveRef): object::ConstructorRef
+
+ + + +
+Implementation + + +
public(friend) fun create_user_derived_object(creator_address: address, derive_ref: &DeriveRef): ConstructorRef {
+    let obj_addr = create_user_derived_object_address(creator_address, derive_ref.self);
+    create_object_internal(creator_address, obj_addr, false)
+}
+
+ + + +
+ + + +## Function `create_object` + +Create a new object by generating a random unique address based on transaction hash. +The unique address is computed sha3_256([transaction hash | auid counter | 0xFB]). +The created object is deletable as we can guarantee the same unique address can +never be regenerated with future txs. + + +
public fun create_object(owner_address: address): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_object(owner_address: address): ConstructorRef {
+    let unique_address = transaction_context::generate_auid_address();
+    create_object_internal(owner_address, unique_address, true)
+}
+
+ + + +
+ + + +## Function `create_sticky_object` + +Same as create_object except the object to be created will be undeletable. + + +
public fun create_sticky_object(owner_address: address): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_sticky_object(owner_address: address): ConstructorRef {
+    let unique_address = transaction_context::generate_auid_address();
+    create_object_internal(owner_address, unique_address, false)
+}
+
+ + + +
+ + + +## Function `create_sticky_object_at_address` + +Create a sticky object at a specific address. Only used by aptos_framework::coin. + + +
public(friend) fun create_sticky_object_at_address(owner_address: address, object_address: address): object::ConstructorRef
+
+ + + +
+Implementation + + +
public(friend) fun create_sticky_object_at_address(
+    owner_address: address,
+    object_address: address,
+): ConstructorRef {
+    create_object_internal(owner_address, object_address, false)
+}
+
+ + + +
+ + + +## Function `create_object_from_account` + +Use create_object instead. +Create a new object from a GUID generated by an account. +As the GUID creation internally increments a counter, two transactions that executes +create_object_from_account function for the same creator run sequentially. +Therefore, using create_object method for creating objects is preferrable as it +doesn't have the same bottlenecks. + + +
#[deprecated]
+public fun create_object_from_account(creator: &signer): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_object_from_account(creator: &signer): ConstructorRef {
+    let guid = account::create_guid(creator);
+    create_object_from_guid(signer::address_of(creator), guid)
+}
+
+ + + +
+ + + +## Function `create_object_from_object` + +Use create_object instead. +Create a new object from a GUID generated by an object. +As the GUID creation internally increments a counter, two transactions that executes +create_object_from_object function for the same creator run sequentially. +Therefore, using create_object method for creating objects is preferrable as it +doesn't have the same bottlenecks. + + +
#[deprecated]
+public fun create_object_from_object(creator: &signer): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_object_from_object(creator: &signer): ConstructorRef acquires ObjectCore {
+    let guid = create_guid(creator);
+    create_object_from_guid(signer::address_of(creator), guid)
+}
+
+ + + +
+ + + +## Function `create_object_from_guid` + + + +
fun create_object_from_guid(creator_address: address, guid: guid::GUID): object::ConstructorRef
+
+ + + +
+Implementation + + +
fun create_object_from_guid(creator_address: address, guid: guid::GUID): ConstructorRef {
+    let bytes = bcs::to_bytes(&guid);
+    vector::push_back(&mut bytes, OBJECT_FROM_GUID_ADDRESS_SCHEME);
+    let obj_addr = from_bcs::to_address(hash::sha3_256(bytes));
+    create_object_internal(creator_address, obj_addr, true)
+}
+
+ + + +
+ + + +## Function `create_object_internal` + + + +
fun create_object_internal(creator_address: address, object: address, can_delete: bool): object::ConstructorRef
+
+ + + +
+Implementation + + +
fun create_object_internal(
+    creator_address: address,
+    object: address,
+    can_delete: bool,
+): ConstructorRef {
+    assert!(!exists<ObjectCore>(object), error::already_exists(EOBJECT_EXISTS));
+
+    let object_signer = create_signer(object);
+    let guid_creation_num = INIT_GUID_CREATION_NUM;
+    let transfer_events_guid = guid::create(object, &mut guid_creation_num);
+
+    move_to(
+        &object_signer,
+        ObjectCore {
+            guid_creation_num,
+            owner: creator_address,
+            allow_ungated_transfer: true,
+            transfer_events: event::new_event_handle(transfer_events_guid),
+        },
+    );
+    ConstructorRef { self: object, can_delete }
+}
+
+ + + +
+ + + +## Function `generate_delete_ref` + +Generates the DeleteRef, which can be used to remove ObjectCore from global storage. + + +
public fun generate_delete_ref(ref: &object::ConstructorRef): object::DeleteRef
+
+ + + +
+Implementation + + +
public fun generate_delete_ref(ref: &ConstructorRef): DeleteRef {
+    assert!(ref.can_delete, error::permission_denied(ECANNOT_DELETE));
+    DeleteRef { self: ref.self }
+}
+
+ + + +
+ + + +## Function `generate_extend_ref` + +Generates the ExtendRef, which can be used to add new events and resources to the object. + + +
public fun generate_extend_ref(ref: &object::ConstructorRef): object::ExtendRef
+
+ + + +
+Implementation + + +
public fun generate_extend_ref(ref: &ConstructorRef): ExtendRef {
+    ExtendRef { self: ref.self }
+}
+
+ + + +
+ + + +## Function `generate_transfer_ref` + +Generates the TransferRef, which can be used to manage object transfers. + + +
public fun generate_transfer_ref(ref: &object::ConstructorRef): object::TransferRef
+
+ + + +
+Implementation + + +
public fun generate_transfer_ref(ref: &ConstructorRef): TransferRef {
+    assert!(!exists<Untransferable>(ref.self), error::permission_denied(EOBJECT_NOT_TRANSFERRABLE));
+    TransferRef { self: ref.self }
+}
+
+ + + +
+ + + +## Function `generate_derive_ref` + +Generates the DeriveRef, which can be used to create determnistic derived objects from the current object. + + +
public fun generate_derive_ref(ref: &object::ConstructorRef): object::DeriveRef
+
+ + + +
+Implementation + + +
public fun generate_derive_ref(ref: &ConstructorRef): DeriveRef {
+    DeriveRef { self: ref.self }
+}
+
+ + + +
+ + + +## Function `generate_signer` + +Create a signer for the ConstructorRef + + +
public fun generate_signer(ref: &object::ConstructorRef): signer
+
+ + + +
+Implementation + + +
public fun generate_signer(ref: &ConstructorRef): signer {
+    create_signer(ref.self)
+}
+
+ + + +
+ + + +## Function `address_from_constructor_ref` + +Returns the address associated with the constructor + + +
public fun address_from_constructor_ref(ref: &object::ConstructorRef): address
+
+ + + +
+Implementation + + +
public fun address_from_constructor_ref(ref: &ConstructorRef): address {
+    ref.self
+}
+
+ + + +
+ + + +## Function `object_from_constructor_ref` + +Returns an Object from within a ConstructorRef + + +
public fun object_from_constructor_ref<T: key>(ref: &object::ConstructorRef): object::Object<T>
+
+ + + +
+Implementation + + +
public fun object_from_constructor_ref<T: key>(ref: &ConstructorRef): Object<T> {
+    address_to_object<T>(ref.self)
+}
+
+ + + +
+ + + +## Function `can_generate_delete_ref` + +Returns whether or not the ConstructorRef can be used to create DeleteRef + + +
public fun can_generate_delete_ref(ref: &object::ConstructorRef): bool
+
+ + + +
+Implementation + + +
public fun can_generate_delete_ref(ref: &ConstructorRef): bool {
+    ref.can_delete
+}
+
+ + + +
+ + + +## Function `create_guid` + +Create a guid for the object, typically used for events + + +
public fun create_guid(object: &signer): guid::GUID
+
+ + + +
+Implementation + + +
public fun create_guid(object: &signer): guid::GUID acquires ObjectCore {
+    let addr = signer::address_of(object);
+    let object_data = borrow_global_mut<ObjectCore>(addr);
+    guid::create(addr, &mut object_data.guid_creation_num)
+}
+
+ + + +
+ + + +## Function `new_event_handle` + +Generate a new event handle. + + +
public fun new_event_handle<T: drop, store>(object: &signer): event::EventHandle<T>
+
+ + + +
+Implementation + + +
public fun new_event_handle<T: drop + store>(
+    object: &signer,
+): event::EventHandle<T> acquires ObjectCore {
+    event::new_event_handle(create_guid(object))
+}
+
+ + + +
+ + + +## Function `address_from_delete_ref` + +Returns the address associated with the constructor + + +
public fun address_from_delete_ref(ref: &object::DeleteRef): address
+
+ + + +
+Implementation + + +
public fun address_from_delete_ref(ref: &DeleteRef): address {
+    ref.self
+}
+
+ + + +
+ + + +## Function `object_from_delete_ref` + +Returns an Object from within a DeleteRef. + + +
public fun object_from_delete_ref<T: key>(ref: &object::DeleteRef): object::Object<T>
+
+ + + +
+Implementation + + +
public fun object_from_delete_ref<T: key>(ref: &DeleteRef): Object<T> {
+    address_to_object<T>(ref.self)
+}
+
+ + + +
+ + + +## Function `delete` + +Removes from the specified Object from global storage. + + +
public fun delete(ref: object::DeleteRef)
+
+ + + +
+Implementation + + +
public fun delete(ref: DeleteRef) acquires Untransferable, ObjectCore {
+    let object_core = move_from<ObjectCore>(ref.self);
+    let ObjectCore {
+        guid_creation_num: _,
+        owner: _,
+        allow_ungated_transfer: _,
+        transfer_events,
+    } = object_core;
+
+    if (exists<Untransferable>(ref.self)) {
+      let Untransferable {} = move_from<Untransferable>(ref.self);
+    };
+
+    event::destroy_handle(transfer_events);
+}
+
+ + + +
+ + + +## Function `generate_signer_for_extending` + +Create a signer for the ExtendRef + + +
public fun generate_signer_for_extending(ref: &object::ExtendRef): signer
+
+ + + +
+Implementation + + +
public fun generate_signer_for_extending(ref: &ExtendRef): signer {
+    create_signer(ref.self)
+}
+
+ + + +
+ + + +## Function `address_from_extend_ref` + +Returns an address from within a ExtendRef. + + +
public fun address_from_extend_ref(ref: &object::ExtendRef): address
+
+ + + +
+Implementation + + +
public fun address_from_extend_ref(ref: &ExtendRef): address {
+    ref.self
+}
+
+ + + +
+ + + +## Function `disable_ungated_transfer` + +Disable direct transfer, transfers can only be triggered via a TransferRef + + +
public fun disable_ungated_transfer(ref: &object::TransferRef)
+
+ + + +
+Implementation + + +
public fun disable_ungated_transfer(ref: &TransferRef) acquires ObjectCore {
+    let object = borrow_global_mut<ObjectCore>(ref.self);
+    object.allow_ungated_transfer = false;
+}
+
+ + + +
+ + + +## Function `set_untransferable` + +Prevent moving of the object + + +
public fun set_untransferable(ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
public fun set_untransferable(ref: &ConstructorRef) acquires ObjectCore {
+    let object = borrow_global_mut<ObjectCore>(ref.self);
+    object.allow_ungated_transfer = false;
+    let object_signer = generate_signer(ref);
+    move_to(&object_signer, Untransferable {});
+}
+
+ + + +
+ + + +## Function `enable_ungated_transfer` + +Enable direct transfer. + + +
public fun enable_ungated_transfer(ref: &object::TransferRef)
+
+ + + +
+Implementation + + +
public fun enable_ungated_transfer(ref: &TransferRef) acquires ObjectCore {
+    assert!(!exists<Untransferable>(ref.self), error::permission_denied(EOBJECT_NOT_TRANSFERRABLE));
+    let object = borrow_global_mut<ObjectCore>(ref.self);
+    object.allow_ungated_transfer = true;
+}
+
+ + + +
+ + + +## Function `generate_linear_transfer_ref` + +Create a LinearTransferRef for a one-time transfer. This requires that the owner at the +time of generation is the owner at the time of transferring. + + +
public fun generate_linear_transfer_ref(ref: &object::TransferRef): object::LinearTransferRef
+
+ + + +
+Implementation + + +
public fun generate_linear_transfer_ref(ref: &TransferRef): LinearTransferRef acquires ObjectCore {
+    assert!(!exists<Untransferable>(ref.self), error::permission_denied(EOBJECT_NOT_TRANSFERRABLE));
+    let owner = owner(Object<ObjectCore> { inner: ref.self });
+    LinearTransferRef {
+        self: ref.self,
+        owner,
+    }
+}
+
+ + + +
+ + + +## Function `transfer_with_ref` + +Transfer to the destination address using a LinearTransferRef. + + +
public fun transfer_with_ref(ref: object::LinearTransferRef, to: address)
+
+ + + +
+Implementation + + +
public fun transfer_with_ref(ref: LinearTransferRef, to: address) acquires ObjectCore, TombStone {
+    assert!(!exists<Untransferable>(ref.self), error::permission_denied(EOBJECT_NOT_TRANSFERRABLE));
+
+    // Undo soft burn if present as we don't want the original owner to be able to reclaim by calling unburn later.
+    if (exists<TombStone>(ref.self)) {
+        let TombStone { original_owner: _ } = move_from<TombStone>(ref.self);
+    };
+
+    let object = borrow_global_mut<ObjectCore>(ref.self);
+    assert!(
+        object.owner == ref.owner,
+        error::permission_denied(ENOT_OBJECT_OWNER),
+    );
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            Transfer {
+                object: ref.self,
+                from: object.owner,
+                to,
+            },
+        );
+    };
+    event::emit_event(
+        &mut object.transfer_events,
+        TransferEvent {
+            object: ref.self,
+            from: object.owner,
+            to,
+        },
+    );
+    object.owner = to;
+}
+
+ + + +
+ + + +## Function `transfer_call` + +Entry function that can be used to transfer, if allow_ungated_transfer is set true. + + +
public entry fun transfer_call(owner: &signer, object: address, to: address)
+
+ + + +
+Implementation + + +
public entry fun transfer_call(
+    owner: &signer,
+    object: address,
+    to: address,
+) acquires ObjectCore {
+    transfer_raw(owner, object, to)
+}
+
+ + + +
+ + + +## Function `transfer` + +Transfers ownership of the object (and all associated resources) at the specified address +for Object to the "to" address. + + +
public entry fun transfer<T: key>(owner: &signer, object: object::Object<T>, to: address)
+
+ + + +
+Implementation + + +
public entry fun transfer<T: key>(
+    owner: &signer,
+    object: Object<T>,
+    to: address,
+) acquires ObjectCore {
+    transfer_raw(owner, object.inner, to)
+}
+
+ + + +
+ + + +## Function `transfer_raw` + +Attempts to transfer using addresses only. Transfers the given object if +allow_ungated_transfer is set true. Note, that this allows the owner of a nested object to +transfer that object, so long as allow_ungated_transfer is enabled at each stage in the +hierarchy. + + +
public fun transfer_raw(owner: &signer, object: address, to: address)
+
+ + + +
+Implementation + + +
public fun transfer_raw(
+    owner: &signer,
+    object: address,
+    to: address,
+) acquires ObjectCore {
+    let owner_address = signer::address_of(owner);
+    verify_ungated_and_descendant(owner_address, object);
+    transfer_raw_inner(object, to);
+}
+
+ + + +
+ + + +## Function `transfer_raw_inner` + + + +
fun transfer_raw_inner(object: address, to: address)
+
+ + + +
+Implementation + + +
inline fun transfer_raw_inner(object: address, to: address) acquires ObjectCore {
+    let object_core = borrow_global_mut<ObjectCore>(object);
+    if (object_core.owner != to) {
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(
+                Transfer {
+                    object,
+                    from: object_core.owner,
+                    to,
+                },
+            );
+        };
+        event::emit_event(
+            &mut object_core.transfer_events,
+            TransferEvent {
+                object,
+                from: object_core.owner,
+                to,
+            },
+        );
+        object_core.owner = to;
+    };
+}
+
+ + + +
+ + + +## Function `transfer_to_object` + +Transfer the given object to another object. See transfer for more information. + + +
public entry fun transfer_to_object<O: key, T: key>(owner: &signer, object: object::Object<O>, to: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun transfer_to_object<O: key, T: key>(
+    owner: &signer,
+    object: Object<O>,
+    to: Object<T>,
+) acquires ObjectCore {
+    transfer(owner, object, to.inner)
+}
+
+ + + +
+ + + +## Function `verify_ungated_and_descendant` + +This checks that the destination address is eventually owned by the owner and that each +object between the two allows for ungated transfers. Note, this is limited to a depth of 8 +objects may have cyclic dependencies. + + +
fun verify_ungated_and_descendant(owner: address, destination: address)
+
+ + + +
+Implementation + + +
fun verify_ungated_and_descendant(owner: address, destination: address) acquires ObjectCore {
+    let current_address = destination;
+    assert!(
+        exists<ObjectCore>(current_address),
+        error::not_found(EOBJECT_DOES_NOT_EXIST),
+    );
+
+    let object = borrow_global<ObjectCore>(current_address);
+    assert!(
+        object.allow_ungated_transfer,
+        error::permission_denied(ENO_UNGATED_TRANSFERS),
+    );
+
+    let current_address = object.owner;
+    let count = 0;
+    while (owner != current_address) {
+        count = count + 1;
+        assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING));
+        // At this point, the first object exists and so the more likely case is that the
+        // object's owner is not an object. So we return a more sensible error.
+        assert!(
+            exists<ObjectCore>(current_address),
+            error::permission_denied(ENOT_OBJECT_OWNER),
+        );
+        let object = borrow_global<ObjectCore>(current_address);
+        assert!(
+            object.allow_ungated_transfer,
+            error::permission_denied(ENO_UNGATED_TRANSFERS),
+        );
+        current_address = object.owner;
+    };
+}
+
+ + + +
+ + + +## Function `burn` + +Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. +This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. +Original owners can reclaim burnt objects any time in the future by calling unburn. + + +
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun burn<T: key>(owner: &signer, object: Object<T>) acquires ObjectCore {
+    let original_owner = signer::address_of(owner);
+    assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER));
+    let object_addr = object.inner;
+    move_to(&create_signer(object_addr), TombStone { original_owner });
+    transfer_raw_inner(object_addr, BURN_ADDRESS);
+}
+
+ + + +
+ + + +## Function `unburn` + +Allow origin owners to reclaim any objects they previous burnt. + + +
public entry fun unburn<T: key>(original_owner: &signer, object: object::Object<T>)
+
+ + + +
+Implementation + + +
public entry fun unburn<T: key>(
+    original_owner: &signer,
+    object: Object<T>,
+) acquires TombStone, ObjectCore {
+    let object_addr = object.inner;
+    assert!(exists<TombStone>(object_addr), error::invalid_argument(EOBJECT_NOT_BURNT));
+
+    let TombStone { original_owner: original_owner_addr } = move_from<TombStone>(object_addr);
+    assert!(original_owner_addr == signer::address_of(original_owner), error::permission_denied(ENOT_OBJECT_OWNER));
+    transfer_raw_inner(object_addr, original_owner_addr);
+}
+
+ + + +
+ + + +## Function `ungated_transfer_allowed` + +Accessors +Return true if ungated transfer is allowed. + + +
public fun ungated_transfer_allowed<T: key>(object: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun ungated_transfer_allowed<T: key>(object: Object<T>): bool acquires ObjectCore {
+    assert!(
+        exists<ObjectCore>(object.inner),
+        error::not_found(EOBJECT_DOES_NOT_EXIST),
+    );
+    borrow_global<ObjectCore>(object.inner).allow_ungated_transfer
+}
+
+ + + +
+ + + +## Function `owner` + +Return the current owner. + + +
public fun owner<T: key>(object: object::Object<T>): address
+
+ + + +
+Implementation + + +
public fun owner<T: key>(object: Object<T>): address acquires ObjectCore {
+    assert!(
+        exists<ObjectCore>(object.inner),
+        error::not_found(EOBJECT_DOES_NOT_EXIST),
+    );
+    borrow_global<ObjectCore>(object.inner).owner
+}
+
+ + + +
+ + + +## Function `is_owner` + +Return true if the provided address is the current owner. + + +
public fun is_owner<T: key>(object: object::Object<T>, owner: address): bool
+
+ + + +
+Implementation + + +
public fun is_owner<T: key>(object: Object<T>, owner: address): bool acquires ObjectCore {
+    owner(object) == owner
+}
+
+ + + +
+ + + +## Function `owns` + +Return true if the provided address has indirect or direct ownership of the provided object. + + +
public fun owns<T: key>(object: object::Object<T>, owner: address): bool
+
+ + + +
+Implementation + + +
public fun owns<T: key>(object: Object<T>, owner: address): bool acquires ObjectCore {
+    let current_address = object_address(&object);
+    if (current_address == owner) {
+        return true
+    };
+
+    assert!(
+        exists<ObjectCore>(current_address),
+        error::not_found(EOBJECT_DOES_NOT_EXIST),
+    );
+
+    let object = borrow_global<ObjectCore>(current_address);
+    let current_address = object.owner;
+
+    let count = 0;
+    while (owner != current_address) {
+        count = count + 1;
+        assert!(count < MAXIMUM_OBJECT_NESTING, error::out_of_range(EMAXIMUM_NESTING));
+        if (!exists<ObjectCore>(current_address)) {
+            return false
+        };
+
+        let object = borrow_global<ObjectCore>(current_address);
+        current_address = object.owner;
+    };
+    true
+}
+
+ + + +
+ + + +## Function `root_owner` + +Returns the root owner of an object. As objects support nested ownership, it can be useful +to determine the identity of the starting point of ownership. + + +
public fun root_owner<T: key>(object: object::Object<T>): address
+
+ + + +
+Implementation + + +
public fun root_owner<T: key>(object: Object<T>): address acquires ObjectCore {
+    let obj_owner = owner(object);
+    while (is_object(obj_owner)) {
+        obj_owner = owner(address_to_object<ObjectCore>(obj_owner));
+    };
+    obj_owner
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1It's not possible to create an object twice on the same address.CriticalThe create_object_internal function includes an assertion to ensure that the object being created does not already exist at the specified address.Formally verified via create_object_internal.
2Only its owner may transfer an object.CriticalThe transfer function mandates that the transaction be signed by the owner's address, ensuring that only the rightful owner may initiate the object transfer.Audited that it aborts if anyone other than the owner attempts to transfer.
3The indirect owner of an object may transfer the object.MediumThe owns function evaluates to true when the given address possesses either direct or indirect ownership of the specified object.Audited that it aborts if address transferring is not indirect owner.
4Objects may never change the address which houses them.LowAfter creating an object, transfers to another owner may occur. However, the address which stores the object may not be changed.This is implied by high-level requirement 1.
5If an ungated transfer is disabled on an object in an indirect ownership chain, a transfer should not occur.MediumCalling disable_ungated_transfer disables direct transfer, and only TransferRef may trigger transfers. The transfer_with_ref function is called.Formally verified via transfer_with_ref.
6Object addresses must not overlap with other addresses in different domains.CriticalThe current addressing scheme with suffixes does not conflict with any existing addresses, such as resource accounts. The GUID space is explicitly separated to ensure this doesn't happen.This is true by construction if one correctly ensures the usage of INIT_GUID_CREATION_NUM during the creation of GUID.
+ + + + + + +### Module-level Specification + + +
pragma aborts_if_is_strict;
+
+ + + + + + + +
fun spec_exists_at<T: key>(object: address): bool;
+
+ + + + + +### Function `address_to_object` + + +
public fun address_to_object<T: key>(object: address): object::Object<T>
+
+ + + + +
aborts_if !exists<ObjectCore>(object);
+aborts_if !spec_exists_at<T>(object);
+ensures result == Object<T> { inner: object };
+
+ + + + + +### Function `create_object_address` + + +
public fun create_object_address(source: &address, seed: vector<u8>): address
+
+ + + + +
pragma opaque;
+pragma aborts_if_is_strict = false;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_create_object_address(source, seed);
+
+ + + + + + + +
fun spec_create_user_derived_object_address_impl(source: address, derive_from: address): address;
+
+ + + + + +### Function `create_user_derived_object_address_impl` + + +
fun create_user_derived_object_address_impl(source: address, derive_from: address): address
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_create_user_derived_object_address_impl(source, derive_from);
+
+ + + + + +### Function `create_user_derived_object_address` + + +
public fun create_user_derived_object_address(source: address, derive_from: address): address
+
+ + + + +
pragma opaque;
+pragma aborts_if_is_strict = false;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_create_user_derived_object_address(source, derive_from);
+
+ + + + + +### Function `create_guid_object_address` + + +
public fun create_guid_object_address(source: address, creation_num: u64): address
+
+ + + + +
pragma opaque;
+pragma aborts_if_is_strict = false;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_create_guid_object_address(source, creation_num);
+
+ + + + + +### Function `exists_at` + + +
fun exists_at<T: key>(object: address): bool
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_exists_at<T>(object);
+
+ + + + + +### Function `object_address` + + +
public fun object_address<T: key>(object: &object::Object<T>): address
+
+ + + + +
aborts_if false;
+ensures result == object.inner;
+
+ + + + + +### Function `convert` + + +
public fun convert<X: key, Y: key>(object: object::Object<X>): object::Object<Y>
+
+ + + + +
aborts_if !exists<ObjectCore>(object.inner);
+aborts_if !spec_exists_at<Y>(object.inner);
+ensures result == Object<Y> { inner: object.inner };
+
+ + + + + +### Function `create_named_object` + + +
public fun create_named_object(creator: &signer, seed: vector<u8>): object::ConstructorRef
+
+ + + + +
let creator_address = signer::address_of(creator);
+let obj_addr = spec_create_object_address(creator_address, seed);
+aborts_if exists<ObjectCore>(obj_addr);
+ensures exists<ObjectCore>(obj_addr);
+ensures global<ObjectCore>(obj_addr) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: creator_address,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: obj_addr,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: obj_addr, can_delete: false };
+
+ + + + + +### Function `create_user_derived_object` + + +
public(friend) fun create_user_derived_object(creator_address: address, derive_ref: &object::DeriveRef): object::ConstructorRef
+
+ + + + +
let obj_addr = spec_create_user_derived_object_address(creator_address, derive_ref.self);
+aborts_if exists<ObjectCore>(obj_addr);
+ensures exists<ObjectCore>(obj_addr);
+ensures global<ObjectCore>(obj_addr) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: creator_address,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: obj_addr,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: obj_addr, can_delete: false };
+
+ + + + + +### Function `create_object` + + +
public fun create_object(owner_address: address): object::ConstructorRef
+
+ + + + +
pragma aborts_if_is_partial;
+let unique_address = transaction_context::spec_generate_unique_address();
+aborts_if exists<ObjectCore>(unique_address);
+ensures exists<ObjectCore>(unique_address);
+ensures global<ObjectCore>(unique_address) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: owner_address,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: unique_address,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: unique_address, can_delete: true };
+
+ + + + + +### Function `create_sticky_object` + + +
public fun create_sticky_object(owner_address: address): object::ConstructorRef
+
+ + + + +
pragma aborts_if_is_partial;
+let unique_address = transaction_context::spec_generate_unique_address();
+aborts_if exists<ObjectCore>(unique_address);
+ensures exists<ObjectCore>(unique_address);
+ensures global<ObjectCore>(unique_address) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: owner_address,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: unique_address,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: unique_address, can_delete: false };
+
+ + + + + +### Function `create_sticky_object_at_address` + + +
public(friend) fun create_sticky_object_at_address(owner_address: address, object_address: address): object::ConstructorRef
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `create_object_from_account` + + +
#[deprecated]
+public fun create_object_from_account(creator: &signer): object::ConstructorRef
+
+ + + + +
aborts_if !exists<account::Account>(signer::address_of(creator));
+let object_data = global<account::Account>(signer::address_of(creator));
+aborts_if object_data.guid_creation_num + 1 > MAX_U64;
+aborts_if object_data.guid_creation_num + 1 >= account::MAX_GUID_CREATION_NUM;
+let creation_num = object_data.guid_creation_num;
+let addr = signer::address_of(creator);
+let guid = guid::GUID {
+    id: guid::ID {
+        creation_num,
+        addr,
+    }
+};
+let bytes_spec = bcs::to_bytes(guid);
+let bytes = concat(bytes_spec, vec<u8>(OBJECT_FROM_GUID_ADDRESS_SCHEME));
+let hash_bytes = hash::sha3_256(bytes);
+let obj_addr = from_bcs::deserialize<address>(hash_bytes);
+aborts_if exists<ObjectCore>(obj_addr);
+aborts_if !from_bcs::deserializable<address>(hash_bytes);
+ensures global<account::Account>(addr).guid_creation_num == old(
+    global<account::Account>(addr)
+).guid_creation_num + 1;
+ensures exists<ObjectCore>(obj_addr);
+ensures global<ObjectCore>(obj_addr) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: addr,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: obj_addr,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: obj_addr, can_delete: true };
+
+ + + + + +### Function `create_object_from_object` + + +
#[deprecated]
+public fun create_object_from_object(creator: &signer): object::ConstructorRef
+
+ + + + +
aborts_if !exists<ObjectCore>(signer::address_of(creator));
+let object_data = global<ObjectCore>(signer::address_of(creator));
+aborts_if object_data.guid_creation_num + 1 > MAX_U64;
+let creation_num = object_data.guid_creation_num;
+let addr = signer::address_of(creator);
+let guid = guid::GUID {
+    id: guid::ID {
+        creation_num,
+        addr,
+    }
+};
+let bytes_spec = bcs::to_bytes(guid);
+let bytes = concat(bytes_spec, vec<u8>(OBJECT_FROM_GUID_ADDRESS_SCHEME));
+let hash_bytes = hash::sha3_256(bytes);
+let obj_addr = from_bcs::deserialize<address>(hash_bytes);
+aborts_if exists<ObjectCore>(obj_addr);
+aborts_if !from_bcs::deserializable<address>(hash_bytes);
+ensures global<ObjectCore>(addr).guid_creation_num == old(global<ObjectCore>(addr)).guid_creation_num + 1;
+ensures exists<ObjectCore>(obj_addr);
+ensures global<ObjectCore>(obj_addr) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: addr,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: obj_addr,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: obj_addr, can_delete: true };
+
+ + + + + +### Function `create_object_from_guid` + + +
fun create_object_from_guid(creator_address: address, guid: guid::GUID): object::ConstructorRef
+
+ + + + +
let bytes_spec = bcs::to_bytes(guid);
+let bytes = concat(bytes_spec, vec<u8>(OBJECT_FROM_GUID_ADDRESS_SCHEME));
+let hash_bytes = hash::sha3_256(bytes);
+let obj_addr = from_bcs::deserialize<address>(hash_bytes);
+aborts_if exists<ObjectCore>(obj_addr);
+aborts_if !from_bcs::deserializable<address>(hash_bytes);
+ensures exists<ObjectCore>(obj_addr);
+ensures global<ObjectCore>(obj_addr) == ObjectCore {
+    guid_creation_num: INIT_GUID_CREATION_NUM + 1,
+    owner: creator_address,
+    allow_ungated_transfer: true,
+    transfer_events: event::EventHandle {
+        counter: 0,
+        guid: guid::GUID {
+            id: guid::ID {
+                creation_num: INIT_GUID_CREATION_NUM,
+                addr: obj_addr,
+            }
+        }
+    }
+};
+ensures result == ConstructorRef { self: obj_addr, can_delete: true };
+
+ + + + + +### Function `create_object_internal` + + +
fun create_object_internal(creator_address: address, object: address, can_delete: bool): object::ConstructorRef
+
+ + + + +
// This enforces high-level requirement 1:
+aborts_if exists<ObjectCore>(object);
+ensures exists<ObjectCore>(object);
+ensures global<ObjectCore>(object).guid_creation_num == INIT_GUID_CREATION_NUM + 1;
+ensures result == ConstructorRef { self: object, can_delete };
+
+ + + + + +### Function `generate_delete_ref` + + +
public fun generate_delete_ref(ref: &object::ConstructorRef): object::DeleteRef
+
+ + + + +
aborts_if !ref.can_delete;
+ensures result == DeleteRef { self: ref.self };
+
+ + + + + +### Function `generate_transfer_ref` + + +
public fun generate_transfer_ref(ref: &object::ConstructorRef): object::TransferRef
+
+ + + + +
aborts_if exists<Untransferable>(ref.self);
+ensures result == TransferRef {
+    self: ref.self,
+};
+
+ + + + + +### Function `object_from_constructor_ref` + + +
public fun object_from_constructor_ref<T: key>(ref: &object::ConstructorRef): object::Object<T>
+
+ + + + +
aborts_if !exists<ObjectCore>(ref.self);
+aborts_if !spec_exists_at<T>(ref.self);
+ensures result == Object<T> { inner: ref.self };
+
+ + + + + +### Function `create_guid` + + +
public fun create_guid(object: &signer): guid::GUID
+
+ + + + +
aborts_if !exists<ObjectCore>(signer::address_of(object));
+let object_data = global<ObjectCore>(signer::address_of(object));
+aborts_if object_data.guid_creation_num + 1 > MAX_U64;
+ensures result == guid::GUID {
+    id: guid::ID {
+        creation_num: object_data.guid_creation_num,
+        addr: signer::address_of(object)
+    }
+};
+
+ + + + + +### Function `new_event_handle` + + +
public fun new_event_handle<T: drop, store>(object: &signer): event::EventHandle<T>
+
+ + + + +
aborts_if !exists<ObjectCore>(signer::address_of(object));
+let object_data = global<ObjectCore>(signer::address_of(object));
+aborts_if object_data.guid_creation_num + 1 > MAX_U64;
+let guid = guid::GUID {
+    id: guid::ID {
+        creation_num: object_data.guid_creation_num,
+        addr: signer::address_of(object)
+    }
+};
+ensures result == event::EventHandle<T> {
+    counter: 0,
+    guid,
+};
+
+ + + + + +### Function `object_from_delete_ref` + + +
public fun object_from_delete_ref<T: key>(ref: &object::DeleteRef): object::Object<T>
+
+ + + + +
aborts_if !exists<ObjectCore>(ref.self);
+aborts_if !spec_exists_at<T>(ref.self);
+ensures result == Object<T> { inner: ref.self };
+
+ + + + + +### Function `delete` + + +
public fun delete(ref: object::DeleteRef)
+
+ + + + +
aborts_if !exists<ObjectCore>(ref.self);
+ensures !exists<ObjectCore>(ref.self);
+
+ + + + + +### Function `disable_ungated_transfer` + + +
public fun disable_ungated_transfer(ref: &object::TransferRef)
+
+ + + + +
aborts_if !exists<ObjectCore>(ref.self);
+ensures global<ObjectCore>(ref.self).allow_ungated_transfer == false;
+
+ + + + + +### Function `set_untransferable` + + +
public fun set_untransferable(ref: &object::ConstructorRef)
+
+ + + + +
aborts_if !exists<ObjectCore>(ref.self);
+aborts_if exists<Untransferable>(ref.self);
+ensures exists<Untransferable>(ref.self);
+ensures global<ObjectCore>(ref.self).allow_ungated_transfer == false;
+
+ + + + + +### Function `enable_ungated_transfer` + + +
public fun enable_ungated_transfer(ref: &object::TransferRef)
+
+ + + + +
aborts_if exists<Untransferable>(ref.self);
+aborts_if !exists<ObjectCore>(ref.self);
+ensures global<ObjectCore>(ref.self).allow_ungated_transfer == true;
+
+ + + + + +### Function `generate_linear_transfer_ref` + + +
public fun generate_linear_transfer_ref(ref: &object::TransferRef): object::LinearTransferRef
+
+ + + + +
aborts_if exists<Untransferable>(ref.self);
+aborts_if !exists<ObjectCore>(ref.self);
+let owner = global<ObjectCore>(ref.self).owner;
+ensures result == LinearTransferRef {
+    self: ref.self,
+    owner,
+};
+
+ + + + + +### Function `transfer_with_ref` + + +
public fun transfer_with_ref(ref: object::LinearTransferRef, to: address)
+
+ + + + +
aborts_if exists<Untransferable>(ref.self);
+let object = global<ObjectCore>(ref.self);
+aborts_if !exists<ObjectCore>(ref.self);
+// This enforces high-level requirement 5:
+aborts_if object.owner != ref.owner;
+ensures global<ObjectCore>(ref.self).owner == to;
+
+ + + + + +### Function `transfer_call` + + +
public entry fun transfer_call(owner: &signer, object: address, to: address)
+
+ + + + +
pragma aborts_if_is_partial;
+let owner_address = signer::address_of(owner);
+aborts_if !exists<ObjectCore>(object);
+aborts_if !global<ObjectCore>(object).allow_ungated_transfer;
+
+ + + + + +### Function `transfer` + + +
public entry fun transfer<T: key>(owner: &signer, object: object::Object<T>, to: address)
+
+ + + + +
pragma aborts_if_is_partial;
+let owner_address = signer::address_of(owner);
+let object_address = object.inner;
+aborts_if !exists<ObjectCore>(object_address);
+aborts_if !global<ObjectCore>(object_address).allow_ungated_transfer;
+
+ + + + + +### Function `transfer_raw` + + +
public fun transfer_raw(owner: &signer, object: address, to: address)
+
+ + + + +
pragma aborts_if_is_partial;
+let owner_address = signer::address_of(owner);
+aborts_if !exists<ObjectCore>(object);
+aborts_if !global<ObjectCore>(object).allow_ungated_transfer;
+
+ + + + + +### Function `transfer_to_object` + + +
public entry fun transfer_to_object<O: key, T: key>(owner: &signer, object: object::Object<O>, to: object::Object<T>)
+
+ + + + +
pragma aborts_if_is_partial;
+let owner_address = signer::address_of(owner);
+let object_address = object.inner;
+aborts_if !exists<ObjectCore>(object_address);
+aborts_if !global<ObjectCore>(object_address).allow_ungated_transfer;
+
+ + + + + +### Function `verify_ungated_and_descendant` + + +
fun verify_ungated_and_descendant(owner: address, destination: address)
+
+ + + + +
pragma aborts_if_is_partial;
+pragma unroll = MAXIMUM_OBJECT_NESTING;
+aborts_if !exists<ObjectCore>(destination);
+aborts_if !global<ObjectCore>(destination).allow_ungated_transfer;
+
+ + + + + +### Function `burn` + + +
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
+
+ + + + +
pragma aborts_if_is_partial;
+let object_address = object.inner;
+aborts_if !exists<ObjectCore>(object_address);
+aborts_if owner(object) != signer::address_of(owner);
+aborts_if is_burnt(object);
+
+ + + + + +### Function `unburn` + + +
public entry fun unburn<T: key>(original_owner: &signer, object: object::Object<T>)
+
+ + + + +
pragma aborts_if_is_partial;
+let object_address = object.inner;
+aborts_if !exists<ObjectCore>(object_address);
+aborts_if !is_burnt(object);
+let tomb_stone = borrow_global<TombStone>(object_address);
+aborts_if tomb_stone.original_owner != signer::address_of(original_owner);
+
+ + + + + +### Function `ungated_transfer_allowed` + + +
public fun ungated_transfer_allowed<T: key>(object: object::Object<T>): bool
+
+ + + + +
aborts_if !exists<ObjectCore>(object.inner);
+ensures result == global<ObjectCore>(object.inner).allow_ungated_transfer;
+
+ + + + + +### Function `owner` + + +
public fun owner<T: key>(object: object::Object<T>): address
+
+ + + + +
aborts_if !exists<ObjectCore>(object.inner);
+ensures result == global<ObjectCore>(object.inner).owner;
+
+ + + + + +### Function `is_owner` + + +
public fun is_owner<T: key>(object: object::Object<T>, owner: address): bool
+
+ + + + +
aborts_if !exists<ObjectCore>(object.inner);
+ensures result == (global<ObjectCore>(object.inner).owner == owner);
+
+ + + + + +### Function `owns` + + +
public fun owns<T: key>(object: object::Object<T>, owner: address): bool
+
+ + + + +
pragma aborts_if_is_partial;
+let current_address_0 = object.inner;
+let object_0 = global<ObjectCore>(current_address_0);
+let current_address = object_0.owner;
+aborts_if object.inner != owner && !exists<ObjectCore>(object.inner);
+ensures current_address_0 == owner ==> result == true;
+
+ + + + + +### Function `root_owner` + + +
public fun root_owner<T: key>(object: object::Object<T>): address
+
+ + + + +
pragma aborts_if_is_partial;
+
+ + + + + + + +
fun spec_create_object_address(source: address, seed: vector<u8>): address;
+
+ + + + + + + +
fun spec_create_user_derived_object_address(source: address, derive_from: address): address;
+
+ + + + + + + +
fun spec_create_guid_object_address(source: address, creation_num: u64): address;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/object_code_deployment.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/object_code_deployment.md new file mode 100644 index 0000000000000..210d0a1e6b892 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/object_code_deployment.md @@ -0,0 +1,373 @@ + + + +# Module `0x1::object_code_deployment` + +This module allows users to deploy, upgrade and freeze modules deployed to objects on-chain. +This enables users to deploy modules to an object with a unique address each time they are published. +This modules provides an alternative method to publish code on-chain, where code is deployed to objects rather than accounts. +This is encouraged as it abstracts the necessary resources needed for deploying modules, +along with the required authorization to upgrade and freeze modules. + +The functionalities of this module are as follows. + +Publishing modules flow: +1. Create a new object with the address derived from the publisher address and the object seed. +2. Publish the module passed in the function via metadata_serialized and code to the newly created object. +3. Emits 'Publish' event with the address of the newly created object. +4. Create a ManagingRefs which stores the extend ref of the newly created object. +Note: This is needed to upgrade the code as the signer must be generated to upgrade the existing code in an object. + +Upgrading modules flow: +1. Assert the code_object passed in the function is owned by the publisher. +2. Assert the code_object passed in the function exists in global storage. +2. Retrieve the ExtendRef from the code_object and generate the signer from this. +3. Upgrade the module with the metadata_serialized and code passed in the function. +4. Emits 'Upgrade' event with the address of the object with the upgraded code. +Note: If the modules were deployed as immutable when calling publish, the upgrade will fail. + +Freezing modules flow: +1. Assert the code_object passed in the function exists in global storage. +2. Assert the code_object passed in the function is owned by the publisher. +3. Mark all the modules in the code_object as immutable. +4. Emits 'Freeze' event with the address of the object with the frozen code. +Note: There is no unfreeze function as this gives no benefit if the user can freeze/unfreeze modules at will. +Once modules are marked as immutable, they cannot be made mutable again. + + +- [Resource `ManagingRefs`](#0x1_object_code_deployment_ManagingRefs) +- [Struct `Publish`](#0x1_object_code_deployment_Publish) +- [Struct `Upgrade`](#0x1_object_code_deployment_Upgrade) +- [Struct `Freeze`](#0x1_object_code_deployment_Freeze) +- [Constants](#@Constants_0) +- [Function `publish`](#0x1_object_code_deployment_publish) +- [Function `object_seed`](#0x1_object_code_deployment_object_seed) +- [Function `upgrade`](#0x1_object_code_deployment_upgrade) +- [Function `freeze_code_object`](#0x1_object_code_deployment_freeze_code_object) + + +
use 0x1::account;
+use 0x1::bcs;
+use 0x1::code;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::object;
+use 0x1::signer;
+use 0x1::vector;
+
+ + + + + +## Resource `ManagingRefs` + +Internal struct, attached to the object, that holds Refs we need to manage the code deployment (i.e. upgrades). + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct ManagingRefs has key
+
+ + + +
+Fields + + +
+
+extend_ref: object::ExtendRef +
+
+ We need to keep the extend ref to be able to generate the signer to upgrade existing code. +
+
+ + +
+ + + +## Struct `Publish` + +Event emitted when code is published to an object. + + +
#[event]
+struct Publish has drop, store
+
+ + + +
+Fields + + +
+
+object_address: address +
+
+ +
+
+ + +
+ + + +## Struct `Upgrade` + +Event emitted when code in an existing object is upgraded. + + +
#[event]
+struct Upgrade has drop, store
+
+ + + +
+Fields + + +
+
+object_address: address +
+
+ +
+
+ + +
+ + + +## Struct `Freeze` + +Event emitted when code in an existing object is made immutable. + + +
#[event]
+struct Freeze has drop, store
+
+ + + +
+Fields + + +
+
+object_address: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + +code_object does not exist. + + +
const ECODE_OBJECT_DOES_NOT_EXIST: u64 = 3;
+
+ + + + + +Not the owner of the code_object + + +
const ENOT_CODE_OBJECT_OWNER: u64 = 2;
+
+ + + + + +Object code deployment feature not supported. + + +
const EOBJECT_CODE_DEPLOYMENT_NOT_SUPPORTED: u64 = 1;
+
+ + + + + + + +
const OBJECT_CODE_DEPLOYMENT_DOMAIN_SEPARATOR: vector<u8> = [97, 112, 116, 111, 115, 95, 102, 114, 97, 109, 101, 119, 111, 114, 107, 58, 58, 111, 98, 106, 101, 99, 116, 95, 99, 111, 100, 101, 95, 100, 101, 112, 108, 111, 121, 109, 101, 110, 116];
+
+ + + + + +## Function `publish` + +Creates a new object with a unique address derived from the publisher address and the object seed. +Publishes the code passed in the function to the newly created object. +The caller must provide package metadata describing the package via metadata_serialized and +the code to be published via code. This contains a vector of modules to be deployed on-chain. + + +
public entry fun publish(publisher: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun publish(
+    publisher: &signer,
+    metadata_serialized: vector<u8>,
+    code: vector<vector<u8>>,
+) {
+    assert!(
+        features::is_object_code_deployment_enabled(),
+        error::unavailable(EOBJECT_CODE_DEPLOYMENT_NOT_SUPPORTED),
+    );
+
+    let publisher_address = signer::address_of(publisher);
+    let object_seed = object_seed(publisher_address);
+    let constructor_ref = &object::create_named_object(publisher, object_seed);
+    let code_signer = &object::generate_signer(constructor_ref);
+    code::publish_package_txn(code_signer, metadata_serialized, code);
+
+    event::emit(Publish { object_address: signer::address_of(code_signer), });
+
+    move_to(code_signer, ManagingRefs {
+        extend_ref: object::generate_extend_ref(constructor_ref),
+    });
+}
+
+ + + +
+ + + +## Function `object_seed` + + + +
fun object_seed(publisher: address): vector<u8>
+
+ + + +
+Implementation + + +
inline fun object_seed(publisher: address): vector<u8> {
+    let sequence_number = account::get_sequence_number(publisher) + 1;
+    let seeds = vector[];
+    vector::append(&mut seeds, bcs::to_bytes(&OBJECT_CODE_DEPLOYMENT_DOMAIN_SEPARATOR));
+    vector::append(&mut seeds, bcs::to_bytes(&sequence_number));
+    seeds
+}
+
+ + + +
+ + + +## Function `upgrade` + +Upgrades the existing modules at the code_object address with the new modules passed in code, +along with the metadata metadata_serialized. +Note: If the modules were deployed as immutable when calling publish, the upgrade will fail. +Requires the publisher to be the owner of the code_object. + + +
public entry fun upgrade(publisher: &signer, metadata_serialized: vector<u8>, code: vector<vector<u8>>, code_object: object::Object<code::PackageRegistry>)
+
+ + + +
+Implementation + + +
public entry fun upgrade(
+    publisher: &signer,
+    metadata_serialized: vector<u8>,
+    code: vector<vector<u8>>,
+    code_object: Object<PackageRegistry>,
+) acquires ManagingRefs {
+    let publisher_address = signer::address_of(publisher);
+    assert!(
+        object::is_owner(code_object, publisher_address),
+        error::permission_denied(ENOT_CODE_OBJECT_OWNER),
+    );
+
+    let code_object_address = object::object_address(&code_object);
+    assert!(exists<ManagingRefs>(code_object_address), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST));
+
+    let extend_ref = &borrow_global<ManagingRefs>(code_object_address).extend_ref;
+    let code_signer = &object::generate_signer_for_extending(extend_ref);
+    code::publish_package_txn(code_signer, metadata_serialized, code);
+
+    event::emit(Upgrade { object_address: signer::address_of(code_signer), });
+}
+
+ + + +
+ + + +## Function `freeze_code_object` + +Make an existing upgradable package immutable. Once this is called, the package cannot be made upgradable again. +Each code_object should only have one package, as one package is deployed per object in this module. +Requires the publisher to be the owner of the code_object. + + +
public entry fun freeze_code_object(publisher: &signer, code_object: object::Object<code::PackageRegistry>)
+
+ + + +
+Implementation + + +
public entry fun freeze_code_object(publisher: &signer, code_object: Object<PackageRegistry>) {
+    code::freeze_code_object(publisher, code_object);
+
+    event::emit(Freeze { object_address: object::object_address(&code_object), });
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/optional_aggregator.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/optional_aggregator.md new file mode 100644 index 0000000000000..cd2d4ad5bfb7a --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/optional_aggregator.md @@ -0,0 +1,1158 @@ + + + +# Module `0x1::optional_aggregator` + +This module provides an interface to aggregate integers either via +aggregator (parallelizable) or via normal integers. + + +- [Struct `Integer`](#0x1_optional_aggregator_Integer) +- [Struct `OptionalAggregator`](#0x1_optional_aggregator_OptionalAggregator) +- [Constants](#@Constants_0) +- [Function `new_integer`](#0x1_optional_aggregator_new_integer) +- [Function `add_integer`](#0x1_optional_aggregator_add_integer) +- [Function `sub_integer`](#0x1_optional_aggregator_sub_integer) +- [Function `limit`](#0x1_optional_aggregator_limit) +- [Function `read_integer`](#0x1_optional_aggregator_read_integer) +- [Function `destroy_integer`](#0x1_optional_aggregator_destroy_integer) +- [Function `new`](#0x1_optional_aggregator_new) +- [Function `switch`](#0x1_optional_aggregator_switch) +- [Function `switch_and_zero_out`](#0x1_optional_aggregator_switch_and_zero_out) +- [Function `switch_to_integer_and_zero_out`](#0x1_optional_aggregator_switch_to_integer_and_zero_out) +- [Function `switch_to_aggregator_and_zero_out`](#0x1_optional_aggregator_switch_to_aggregator_and_zero_out) +- [Function `destroy`](#0x1_optional_aggregator_destroy) +- [Function `destroy_optional_aggregator`](#0x1_optional_aggregator_destroy_optional_aggregator) +- [Function `destroy_optional_integer`](#0x1_optional_aggregator_destroy_optional_integer) +- [Function `add`](#0x1_optional_aggregator_add) +- [Function `sub`](#0x1_optional_aggregator_sub) +- [Function `read`](#0x1_optional_aggregator_read) +- [Function `is_parallelizable`](#0x1_optional_aggregator_is_parallelizable) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Struct `OptionalAggregator`](#@Specification_1_OptionalAggregator) + - [Function `new_integer`](#@Specification_1_new_integer) + - [Function `add_integer`](#@Specification_1_add_integer) + - [Function `sub_integer`](#@Specification_1_sub_integer) + - [Function `limit`](#@Specification_1_limit) + - [Function `read_integer`](#@Specification_1_read_integer) + - [Function `destroy_integer`](#@Specification_1_destroy_integer) + - [Function `new`](#@Specification_1_new) + - [Function `switch`](#@Specification_1_switch) + - [Function `switch_and_zero_out`](#@Specification_1_switch_and_zero_out) + - [Function `switch_to_integer_and_zero_out`](#@Specification_1_switch_to_integer_and_zero_out) + - [Function `switch_to_aggregator_and_zero_out`](#@Specification_1_switch_to_aggregator_and_zero_out) + - [Function `destroy`](#@Specification_1_destroy) + - [Function `destroy_optional_aggregator`](#@Specification_1_destroy_optional_aggregator) + - [Function `destroy_optional_integer`](#@Specification_1_destroy_optional_integer) + - [Function `add`](#@Specification_1_add) + - [Function `sub`](#@Specification_1_sub) + - [Function `read`](#@Specification_1_read) + + +
use 0x1::aggregator;
+use 0x1::aggregator_factory;
+use 0x1::error;
+use 0x1::option;
+
+ + + + + +## Struct `Integer` + +Wrapper around integer with a custom overflow limit. Supports add, subtract and read just like Aggregator. + + +
struct Integer has store
+
+ + + +
+Fields + + +
+
+value: u128 +
+
+ +
+
+limit: u128 +
+
+ +
+
+ + +
+ + + +## Struct `OptionalAggregator` + +Contains either an aggregator or a normal integer, both overflowing on limit. + + +
struct OptionalAggregator has store
+
+ + + +
+Fields + + +
+
+aggregator: option::Option<aggregator::Aggregator> +
+
+ +
+
+integer: option::Option<optional_aggregator::Integer> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The value of aggregator underflows (goes below zero). Raised by native code. + + +
const EAGGREGATOR_OVERFLOW: u64 = 1;
+
+ + + + + +Aggregator feature is not supported. Raised by native code. + + +
const EAGGREGATOR_UNDERFLOW: u64 = 2;
+
+ + + + + +## Function `new_integer` + +Creates a new integer which overflows on exceeding a limit. + + +
fun new_integer(limit: u128): optional_aggregator::Integer
+
+ + + +
+Implementation + + +
fun new_integer(limit: u128): Integer {
+    Integer {
+        value: 0,
+        limit,
+    }
+}
+
+ + + +
+ + + +## Function `add_integer` + +Adds value to integer. Aborts on overflowing the limit. + + +
fun add_integer(integer: &mut optional_aggregator::Integer, value: u128)
+
+ + + +
+Implementation + + +
fun add_integer(integer: &mut Integer, value: u128) {
+    assert!(
+        value <= (integer.limit - integer.value),
+        error::out_of_range(EAGGREGATOR_OVERFLOW)
+    );
+    integer.value = integer.value + value;
+}
+
+ + + +
+ + + +## Function `sub_integer` + +Subtracts value from integer. Aborts on going below zero. + + +
fun sub_integer(integer: &mut optional_aggregator::Integer, value: u128)
+
+ + + +
+Implementation + + +
fun sub_integer(integer: &mut Integer, value: u128) {
+    assert!(value <= integer.value, error::out_of_range(EAGGREGATOR_UNDERFLOW));
+    integer.value = integer.value - value;
+}
+
+ + + +
+ + + +## Function `limit` + +Returns an overflow limit of integer. + + +
fun limit(integer: &optional_aggregator::Integer): u128
+
+ + + +
+Implementation + + +
fun limit(integer: &Integer): u128 {
+    integer.limit
+}
+
+ + + +
+ + + +## Function `read_integer` + +Returns a value stored in this integer. + + +
fun read_integer(integer: &optional_aggregator::Integer): u128
+
+ + + +
+Implementation + + +
fun read_integer(integer: &Integer): u128 {
+    integer.value
+}
+
+ + + +
+ + + +## Function `destroy_integer` + +Destroys an integer. + + +
fun destroy_integer(integer: optional_aggregator::Integer)
+
+ + + +
+Implementation + + +
fun destroy_integer(integer: Integer) {
+    let Integer { value: _, limit: _ } = integer;
+}
+
+ + + +
+ + + +## Function `new` + +Creates a new optional aggregator. + + +
public(friend) fun new(limit: u128, parallelizable: bool): optional_aggregator::OptionalAggregator
+
+ + + +
+Implementation + + +
public(friend) fun new(limit: u128, parallelizable: bool): OptionalAggregator {
+    if (parallelizable) {
+        OptionalAggregator {
+            aggregator: option::some(aggregator_factory::create_aggregator_internal(limit)),
+            integer: option::none(),
+        }
+    } else {
+        OptionalAggregator {
+            aggregator: option::none(),
+            integer: option::some(new_integer(limit)),
+        }
+    }
+}
+
+ + + +
+ + + +## Function `switch` + +Switches between parallelizable and non-parallelizable implementations. + + +
public fun switch(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
+
+ + + +
+Implementation + + +
public fun switch(optional_aggregator: &mut OptionalAggregator) {
+    let value = read(optional_aggregator);
+    switch_and_zero_out(optional_aggregator);
+    add(optional_aggregator, value);
+}
+
+ + + +
+ + + +## Function `switch_and_zero_out` + +Switches between parallelizable and non-parallelizable implementations, setting +the value of the new optional aggregator to zero. + + +
fun switch_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
+
+ + + +
+Implementation + + +
fun switch_and_zero_out(optional_aggregator: &mut OptionalAggregator) {
+    if (is_parallelizable(optional_aggregator)) {
+        switch_to_integer_and_zero_out(optional_aggregator);
+    } else {
+        switch_to_aggregator_and_zero_out(optional_aggregator);
+    }
+}
+
+ + + +
+ + + +## Function `switch_to_integer_and_zero_out` + +Switches from parallelizable to non-parallelizable implementation, zero-initializing +the value. + + +
fun switch_to_integer_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
+
+ + + +
+Implementation + + +
fun switch_to_integer_and_zero_out(
+    optional_aggregator: &mut OptionalAggregator
+): u128 {
+    let aggregator = option::extract(&mut optional_aggregator.aggregator);
+    let limit = aggregator::limit(&aggregator);
+    aggregator::destroy(aggregator);
+    let integer = new_integer(limit);
+    option::fill(&mut optional_aggregator.integer, integer);
+    limit
+}
+
+ + + +
+ + + +## Function `switch_to_aggregator_and_zero_out` + +Switches from non-parallelizable to parallelizable implementation, zero-initializing +the value. + + +
fun switch_to_aggregator_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
+
+ + + +
+Implementation + + +
fun switch_to_aggregator_and_zero_out(
+    optional_aggregator: &mut OptionalAggregator
+): u128 {
+    let integer = option::extract(&mut optional_aggregator.integer);
+    let limit = limit(&integer);
+    destroy_integer(integer);
+    let aggregator = aggregator_factory::create_aggregator_internal(limit);
+    option::fill(&mut optional_aggregator.aggregator, aggregator);
+    limit
+}
+
+ + + +
+ + + +## Function `destroy` + +Destroys optional aggregator. + + +
public fun destroy(optional_aggregator: optional_aggregator::OptionalAggregator)
+
+ + + +
+Implementation + + +
public fun destroy(optional_aggregator: OptionalAggregator) {
+    if (is_parallelizable(&optional_aggregator)) {
+        destroy_optional_aggregator(optional_aggregator);
+    } else {
+        destroy_optional_integer(optional_aggregator);
+    }
+}
+
+ + + +
+ + + +## Function `destroy_optional_aggregator` + +Destroys parallelizable optional aggregator and returns its limit. + + +
fun destroy_optional_aggregator(optional_aggregator: optional_aggregator::OptionalAggregator): u128
+
+ + + +
+Implementation + + +
fun destroy_optional_aggregator(optional_aggregator: OptionalAggregator): u128 {
+    let OptionalAggregator { aggregator, integer } = optional_aggregator;
+    let limit = aggregator::limit(option::borrow(&aggregator));
+    aggregator::destroy(option::destroy_some(aggregator));
+    option::destroy_none(integer);
+    limit
+}
+
+ + + +
+ + + +## Function `destroy_optional_integer` + +Destroys non-parallelizable optional aggregator and returns its limit. + + +
fun destroy_optional_integer(optional_aggregator: optional_aggregator::OptionalAggregator): u128
+
+ + + +
+Implementation + + +
fun destroy_optional_integer(optional_aggregator: OptionalAggregator): u128 {
+    let OptionalAggregator { aggregator, integer } = optional_aggregator;
+    let limit = limit(option::borrow(&integer));
+    destroy_integer(option::destroy_some(integer));
+    option::destroy_none(aggregator);
+    limit
+}
+
+ + + +
+ + + +## Function `add` + +Adds value to optional aggregator, aborting on exceeding the limit. + + +
public fun add(optional_aggregator: &mut optional_aggregator::OptionalAggregator, value: u128)
+
+ + + +
+Implementation + + +
public fun add(optional_aggregator: &mut OptionalAggregator, value: u128) {
+    if (option::is_some(&optional_aggregator.aggregator)) {
+        let aggregator = option::borrow_mut(&mut optional_aggregator.aggregator);
+        aggregator::add(aggregator, value);
+    } else {
+        let integer = option::borrow_mut(&mut optional_aggregator.integer);
+        add_integer(integer, value);
+    }
+}
+
+ + + +
+ + + +## Function `sub` + +Subtracts value from optional aggregator, aborting on going below zero. + + +
public fun sub(optional_aggregator: &mut optional_aggregator::OptionalAggregator, value: u128)
+
+ + + +
+Implementation + + +
public fun sub(optional_aggregator: &mut OptionalAggregator, value: u128) {
+    if (option::is_some(&optional_aggregator.aggregator)) {
+        let aggregator = option::borrow_mut(&mut optional_aggregator.aggregator);
+        aggregator::sub(aggregator, value);
+    } else {
+        let integer = option::borrow_mut(&mut optional_aggregator.integer);
+        sub_integer(integer, value);
+    }
+}
+
+ + + +
+ + + +## Function `read` + +Returns the value stored in optional aggregator. + + +
public fun read(optional_aggregator: &optional_aggregator::OptionalAggregator): u128
+
+ + + +
+Implementation + + +
public fun read(optional_aggregator: &OptionalAggregator): u128 {
+    if (option::is_some(&optional_aggregator.aggregator)) {
+        let aggregator = option::borrow(&optional_aggregator.aggregator);
+        aggregator::read(aggregator)
+    } else {
+        let integer = option::borrow(&optional_aggregator.integer);
+        read_integer(integer)
+    }
+}
+
+ + + +
+ + + +## Function `is_parallelizable` + +Returns true if optional aggregator uses parallelizable implementation. + + +
public fun is_parallelizable(optional_aggregator: &optional_aggregator::OptionalAggregator): bool
+
+ + + +
+Implementation + + +
public fun is_parallelizable(optional_aggregator: &OptionalAggregator): bool {
+    option::is_some(&optional_aggregator.aggregator)
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1When creating a new integer instance, it guarantees that the limit assigned is a value passed into the function as an argument, and the value field becomes zero.HighThe new_integer function sets the limit field to the argument passed in, and the value field is set to zero.Formally verified via new_integer.
2For a given integer instance it should always be possible to: (1) return the limit value of the integer resource, (2) return the current value stored in that particular instance, and (3) destroy the integer instance.LowThe following functions should not abort if the Integer instance exists: limit(), read_integer(), destroy_integer().Formally verified via: read_integer, limit, and destroy_integer.
3Every successful switch must end with the aggregator type changed from non-parallelizable to parallelizable or vice versa.HighThe switch function run, if successful, should always change the aggregator type.Formally verified via switch_and_zero_out.
+ + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Struct `OptionalAggregator` + + +
struct OptionalAggregator has store
+
+ + + +
+
+aggregator: option::Option<aggregator::Aggregator> +
+
+ +
+
+integer: option::Option<optional_aggregator::Integer> +
+
+ +
+
+ + + +
invariant option::is_some(aggregator) <==> option::is_none(integer);
+invariant option::is_some(integer) <==> option::is_none(aggregator);
+invariant option::is_some(integer) ==> option::borrow(integer).value <= option::borrow(integer).limit;
+invariant option::is_some(aggregator) ==> aggregator::spec_aggregator_get_val(option::borrow(aggregator)) <=
+    aggregator::spec_get_limit(option::borrow(aggregator));
+
+ + + + + +### Function `new_integer` + + +
fun new_integer(limit: u128): optional_aggregator::Integer
+
+ + + + +
aborts_if false;
+ensures result.limit == limit;
+// This enforces high-level requirement 1:
+ensures result.value == 0;
+
+ + + + + +### Function `add_integer` + + +
fun add_integer(integer: &mut optional_aggregator::Integer, value: u128)
+
+ + +Check for overflow. + + +
aborts_if value > (integer.limit - integer.value);
+aborts_if integer.value + value > MAX_U128;
+ensures integer.value <= integer.limit;
+ensures integer.value == old(integer.value) + value;
+
+ + + + + +### Function `sub_integer` + + +
fun sub_integer(integer: &mut optional_aggregator::Integer, value: u128)
+
+ + + + +
aborts_if value > integer.value;
+ensures integer.value == old(integer.value) - value;
+
+ + + + + +### Function `limit` + + +
fun limit(integer: &optional_aggregator::Integer): u128
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `read_integer` + + +
fun read_integer(integer: &optional_aggregator::Integer): u128
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `destroy_integer` + + +
fun destroy_integer(integer: optional_aggregator::Integer)
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `new` + + +
public(friend) fun new(limit: u128, parallelizable: bool): optional_aggregator::OptionalAggregator
+
+ + + + +
aborts_if parallelizable && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+ensures parallelizable ==> is_parallelizable(result);
+ensures !parallelizable ==> !is_parallelizable(result);
+ensures optional_aggregator_value(result) == 0;
+ensures optional_aggregator_value(result) <= optional_aggregator_limit(result);
+
+ + + + + +### Function `switch` + + +
public fun switch(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
+
+ + + + +
let vec_ref = optional_aggregator.integer.vec;
+aborts_if is_parallelizable(optional_aggregator) && len(vec_ref) != 0;
+aborts_if !is_parallelizable(optional_aggregator) && len(vec_ref) == 0;
+aborts_if !is_parallelizable(optional_aggregator) && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+ensures optional_aggregator_value(optional_aggregator) == optional_aggregator_value(old(optional_aggregator));
+
+ + + + + +### Function `switch_and_zero_out` + + +
fun switch_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
+
+ + +Option does not exist When Option exists. +Option exists when Option does not exist. +The AggregatorFactory is under the @aptos_framework when Option does not exist. + + +
let vec_ref = optional_aggregator.integer.vec;
+aborts_if is_parallelizable(optional_aggregator) && len(vec_ref) != 0;
+aborts_if !is_parallelizable(optional_aggregator) && len(vec_ref) == 0;
+aborts_if !is_parallelizable(optional_aggregator) && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+// This enforces high-level requirement 3:
+ensures is_parallelizable(old(optional_aggregator)) ==> !is_parallelizable(optional_aggregator);
+ensures !is_parallelizable(old(optional_aggregator)) ==> is_parallelizable(optional_aggregator);
+ensures optional_aggregator_value(optional_aggregator) == 0;
+
+ + + + + +### Function `switch_to_integer_and_zero_out` + + +
fun switch_to_integer_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
+
+ + +The aggregator exists and the integer dosex not exist when Switches from parallelizable to non-parallelizable implementation. + + +
let limit = aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator));
+aborts_if len(optional_aggregator.aggregator.vec) == 0;
+aborts_if len(optional_aggregator.integer.vec) != 0;
+ensures !is_parallelizable(optional_aggregator);
+ensures option::borrow(optional_aggregator.integer).limit == limit;
+ensures option::borrow(optional_aggregator.integer).value == 0;
+
+ + + + + +### Function `switch_to_aggregator_and_zero_out` + + +
fun switch_to_aggregator_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
+
+ + +The integer exists and the aggregator does not exist when Switches from non-parallelizable to parallelizable implementation. +The AggregatorFactory is under the @aptos_framework. + + +
let limit = option::borrow(optional_aggregator.integer).limit;
+aborts_if len(optional_aggregator.integer.vec) == 0;
+aborts_if !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
+aborts_if len(optional_aggregator.aggregator.vec) != 0;
+ensures is_parallelizable(optional_aggregator);
+ensures aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator)) == limit;
+ensures aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator)) == 0;
+
+ + + + + +### Function `destroy` + + +
public fun destroy(optional_aggregator: optional_aggregator::OptionalAggregator)
+
+ + + + +
aborts_if is_parallelizable(optional_aggregator) && len(optional_aggregator.integer.vec) != 0;
+aborts_if !is_parallelizable(optional_aggregator) && len(optional_aggregator.integer.vec) == 0;
+
+ + + + + +### Function `destroy_optional_aggregator` + + +
fun destroy_optional_aggregator(optional_aggregator: optional_aggregator::OptionalAggregator): u128
+
+ + +The aggregator exists and the integer does not exist when destroy the aggregator. + + +
aborts_if len(optional_aggregator.aggregator.vec) == 0;
+aborts_if len(optional_aggregator.integer.vec) != 0;
+ensures result == aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator));
+
+ + + + + +### Function `destroy_optional_integer` + + +
fun destroy_optional_integer(optional_aggregator: optional_aggregator::OptionalAggregator): u128
+
+ + +The integer exists and the aggregator does not exist when destroy the integer. + + +
aborts_if len(optional_aggregator.integer.vec) == 0;
+aborts_if len(optional_aggregator.aggregator.vec) != 0;
+ensures result == option::borrow(optional_aggregator.integer).limit;
+
+ + + + + + + +
fun optional_aggregator_value(optional_aggregator: OptionalAggregator): u128 {
+   if (is_parallelizable(optional_aggregator)) {
+       aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator))
+   } else {
+       option::borrow(optional_aggregator.integer).value
+   }
+}
+
+ + + + + + + +
fun optional_aggregator_limit(optional_aggregator: OptionalAggregator): u128 {
+   if (is_parallelizable(optional_aggregator)) {
+       aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator))
+   } else {
+       option::borrow(optional_aggregator.integer).limit
+   }
+}
+
+ + + + + +### Function `add` + + +
public fun add(optional_aggregator: &mut optional_aggregator::OptionalAggregator, value: u128)
+
+ + + + +
include AddAbortsIf;
+ensures ((optional_aggregator_value(optional_aggregator) == optional_aggregator_value(old(optional_aggregator)) + value));
+
+ + + + + + + +
schema AddAbortsIf {
+    optional_aggregator: OptionalAggregator;
+    value: u128;
+    aborts_if is_parallelizable(optional_aggregator) && (aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator))
+        + value > aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator)));
+    aborts_if is_parallelizable(optional_aggregator) && (aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator))
+        + value > MAX_U128);
+    aborts_if !is_parallelizable(optional_aggregator) &&
+        (option::borrow(optional_aggregator.integer).value + value > MAX_U128);
+    aborts_if !is_parallelizable(optional_aggregator) &&
+        (value > (option::borrow(optional_aggregator.integer).limit - option::borrow(optional_aggregator.integer).value));
+}
+
+ + + + + +### Function `sub` + + +
public fun sub(optional_aggregator: &mut optional_aggregator::OptionalAggregator, value: u128)
+
+ + + + +
include SubAbortsIf;
+ensures ((optional_aggregator_value(optional_aggregator) == optional_aggregator_value(old(optional_aggregator)) - value));
+
+ + + + + + + +
schema SubAbortsIf {
+    optional_aggregator: OptionalAggregator;
+    value: u128;
+    aborts_if is_parallelizable(optional_aggregator) && (aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator))
+        < value);
+    aborts_if !is_parallelizable(optional_aggregator) &&
+        (option::borrow(optional_aggregator.integer).value < value);
+}
+
+ + + + + +### Function `read` + + +
public fun read(optional_aggregator: &optional_aggregator::OptionalAggregator): u128
+
+ + + + +
ensures !is_parallelizable(optional_aggregator) ==> result == option::borrow(optional_aggregator.integer).value;
+ensures is_parallelizable(optional_aggregator) ==>
+    result == aggregator::spec_read(option::borrow(optional_aggregator.aggregator));
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/overview.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/overview.md new file mode 100644 index 0000000000000..314baa3612ba9 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/overview.md @@ -0,0 +1,76 @@ + + + +# Aptos Framework + + +This is the reference documentation of the Aptos framework. + + + + +## Index + + +- [`0x1::account`](account.md#0x1_account) +- [`0x1::aggregator`](aggregator.md#0x1_aggregator) +- [`0x1::aggregator_factory`](aggregator_factory.md#0x1_aggregator_factory) +- [`0x1::aggregator_v2`](aggregator_v2.md#0x1_aggregator_v2) +- [`0x1::aptos_account`](aptos_account.md#0x1_aptos_account) +- [`0x1::aptos_coin`](aptos_coin.md#0x1_aptos_coin) +- [`0x1::aptos_governance`](aptos_governance.md#0x1_aptos_governance) +- [`0x1::block`](block.md#0x1_block) +- [`0x1::chain_id`](chain_id.md#0x1_chain_id) +- [`0x1::chain_status`](chain_status.md#0x1_chain_status) +- [`0x1::code`](code.md#0x1_code) +- [`0x1::coin`](coin.md#0x1_coin) +- [`0x1::config_buffer`](config_buffer.md#0x1_config_buffer) +- [`0x1::consensus_config`](consensus_config.md#0x1_consensus_config) +- [`0x1::create_signer`](create_signer.md#0x1_create_signer) +- [`0x1::delegation_pool`](delegation_pool.md#0x1_delegation_pool) +- [`0x1::dispatchable_fungible_asset`](dispatchable_fungible_asset.md#0x1_dispatchable_fungible_asset) +- [`0x1::dkg`](dkg.md#0x1_dkg) +- [`0x1::event`](event.md#0x1_event) +- [`0x1::execution_config`](execution_config.md#0x1_execution_config) +- [`0x1::function_info`](function_info.md#0x1_function_info) +- [`0x1::fungible_asset`](fungible_asset.md#0x1_fungible_asset) +- [`0x1::gas_schedule`](gas_schedule.md#0x1_gas_schedule) +- [`0x1::genesis`](genesis.md#0x1_genesis) +- [`0x1::governance_proposal`](governance_proposal.md#0x1_governance_proposal) +- [`0x1::guid`](guid.md#0x1_guid) +- [`0x1::jwk_consensus_config`](jwk_consensus_config.md#0x1_jwk_consensus_config) +- [`0x1::jwks`](jwks.md#0x1_jwks) +- [`0x1::keyless_account`](keyless_account.md#0x1_keyless_account) +- [`0x1::managed_coin`](managed_coin.md#0x1_managed_coin) +- [`0x1::multisig_account`](multisig_account.md#0x1_multisig_account) +- [`0x1::object`](object.md#0x1_object) +- [`0x1::object_code_deployment`](object_code_deployment.md#0x1_object_code_deployment) +- [`0x1::optional_aggregator`](optional_aggregator.md#0x1_optional_aggregator) +- [`0x1::primary_fungible_store`](primary_fungible_store.md#0x1_primary_fungible_store) +- [`0x1::randomness`](randomness.md#0x1_randomness) +- [`0x1::randomness_api_v0_config`](randomness_api_v0_config.md#0x1_randomness_api_v0_config) +- [`0x1::randomness_config`](randomness_config.md#0x1_randomness_config) +- [`0x1::randomness_config_seqnum`](randomness_config_seqnum.md#0x1_randomness_config_seqnum) +- [`0x1::reconfiguration`](reconfiguration.md#0x1_reconfiguration) +- [`0x1::reconfiguration_state`](reconfiguration_state.md#0x1_reconfiguration_state) +- [`0x1::reconfiguration_with_dkg`](reconfiguration_with_dkg.md#0x1_reconfiguration_with_dkg) +- [`0x1::resource_account`](resource_account.md#0x1_resource_account) +- [`0x1::stake`](stake.md#0x1_stake) +- [`0x1::staking_config`](staking_config.md#0x1_staking_config) +- [`0x1::staking_contract`](staking_contract.md#0x1_staking_contract) +- [`0x1::staking_proxy`](staking_proxy.md#0x1_staking_proxy) +- [`0x1::state_storage`](state_storage.md#0x1_state_storage) +- [`0x1::storage_gas`](storage_gas.md#0x1_storage_gas) +- [`0x1::system_addresses`](system_addresses.md#0x1_system_addresses) +- [`0x1::timestamp`](timestamp.md#0x1_timestamp) +- [`0x1::transaction_context`](transaction_context.md#0x1_transaction_context) +- [`0x1::transaction_fee`](transaction_fee.md#0x1_transaction_fee) +- [`0x1::transaction_validation`](transaction_validation.md#0x1_transaction_validation) +- [`0x1::util`](util.md#0x1_util) +- [`0x1::validator_consensus_info`](validator_consensus_info.md#0x1_validator_consensus_info) +- [`0x1::version`](version.md#0x1_version) +- [`0x1::vesting`](vesting.md#0x1_vesting) +- [`0x1::voting`](voting.md#0x1_voting) + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/primary_fungible_store.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/primary_fungible_store.md new file mode 100644 index 0000000000000..0ebfb73e5d659 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/primary_fungible_store.md @@ -0,0 +1,955 @@ + + + +# Module `0x1::primary_fungible_store` + +This module provides a way for creators of fungible assets to enable support for creating primary (deterministic) +stores for their users. This is useful for assets that are meant to be used as a currency, as it allows users to +easily create a store for their account and deposit/withdraw/transfer fungible assets to/from it. + +The transfer flow works as below: +1. The sender calls transfer on the fungible asset metadata object to transfer amount of fungible asset to +recipient. +2. The fungible asset metadata object calls ensure_primary_store_exists to ensure that both the sender's and the +recipient's primary stores exist. If either doesn't, it will be created. +3. The fungible asset metadata object calls withdraw on the sender's primary store to withdraw amount of +fungible asset from it. This emits a withdraw event. +4. The fungible asset metadata object calls deposit on the recipient's primary store to deposit amount of +fungible asset to it. This emits an deposit event. + + +- [Resource `DeriveRefPod`](#0x1_primary_fungible_store_DeriveRefPod) +- [Function `create_primary_store_enabled_fungible_asset`](#0x1_primary_fungible_store_create_primary_store_enabled_fungible_asset) +- [Function `ensure_primary_store_exists`](#0x1_primary_fungible_store_ensure_primary_store_exists) +- [Function `create_primary_store`](#0x1_primary_fungible_store_create_primary_store) +- [Function `primary_store_address`](#0x1_primary_fungible_store_primary_store_address) +- [Function `primary_store`](#0x1_primary_fungible_store_primary_store) +- [Function `primary_store_exists`](#0x1_primary_fungible_store_primary_store_exists) +- [Function `primary_store_address_inlined`](#0x1_primary_fungible_store_primary_store_address_inlined) +- [Function `primary_store_inlined`](#0x1_primary_fungible_store_primary_store_inlined) +- [Function `primary_store_exists_inlined`](#0x1_primary_fungible_store_primary_store_exists_inlined) +- [Function `balance`](#0x1_primary_fungible_store_balance) +- [Function `is_balance_at_least`](#0x1_primary_fungible_store_is_balance_at_least) +- [Function `is_frozen`](#0x1_primary_fungible_store_is_frozen) +- [Function `withdraw`](#0x1_primary_fungible_store_withdraw) +- [Function `deposit`](#0x1_primary_fungible_store_deposit) +- [Function `force_deposit`](#0x1_primary_fungible_store_force_deposit) +- [Function `transfer`](#0x1_primary_fungible_store_transfer) +- [Function `transfer_assert_minimum_deposit`](#0x1_primary_fungible_store_transfer_assert_minimum_deposit) +- [Function `mint`](#0x1_primary_fungible_store_mint) +- [Function `burn`](#0x1_primary_fungible_store_burn) +- [Function `set_frozen_flag`](#0x1_primary_fungible_store_set_frozen_flag) +- [Function `withdraw_with_ref`](#0x1_primary_fungible_store_withdraw_with_ref) +- [Function `deposit_with_ref`](#0x1_primary_fungible_store_deposit_with_ref) +- [Function `transfer_with_ref`](#0x1_primary_fungible_store_transfer_with_ref) +- [Function `may_be_unburn`](#0x1_primary_fungible_store_may_be_unburn) +- [Specification](#@Specification_0) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + + +
use 0x1::dispatchable_fungible_asset;
+use 0x1::fungible_asset;
+use 0x1::object;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+
+ + + + + +## Resource `DeriveRefPod` + +A resource that holds the derive ref for the fungible asset metadata object. This is used to create primary +stores for users with deterministic addresses so that users can easily deposit/withdraw/transfer fungible +assets. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct DeriveRefPod has key
+
+ + + +
+Fields + + +
+
+metadata_derive_ref: object::DeriveRef +
+
+ +
+
+ + +
+ + + +## Function `create_primary_store_enabled_fungible_asset` + +Create a fungible asset with primary store support. When users transfer fungible assets to each other, their +primary stores will be created automatically if they don't exist. Primary stores have deterministic addresses +so that users can easily deposit/withdraw/transfer fungible assets. + + +
public fun create_primary_store_enabled_fungible_asset(constructor_ref: &object::ConstructorRef, maximum_supply: option::Option<u128>, name: string::String, symbol: string::String, decimals: u8, icon_uri: string::String, project_uri: string::String)
+
+ + + +
+Implementation + + +
public fun create_primary_store_enabled_fungible_asset(
+    constructor_ref: &ConstructorRef,
+    maximum_supply: Option<u128>,
+    name: String,
+    symbol: String,
+    decimals: u8,
+    icon_uri: String,
+    project_uri: String,
+) {
+    fungible_asset::add_fungibility(
+        constructor_ref,
+        maximum_supply,
+        name,
+        symbol,
+        decimals,
+        icon_uri,
+        project_uri,
+    );
+    let metadata_obj = &object::generate_signer(constructor_ref);
+    move_to(metadata_obj, DeriveRefPod {
+        metadata_derive_ref: object::generate_derive_ref(constructor_ref),
+    });
+}
+
+ + + +
+ + + +## Function `ensure_primary_store_exists` + +Ensure that the primary store object for the given address exists. If it doesn't, create it. + + +
public fun ensure_primary_store_exists<T: key>(owner: address, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
+
+ + + +
+Implementation + + +
public fun ensure_primary_store_exists<T: key>(
+    owner: address,
+    metadata: Object<T>,
+): Object<FungibleStore> acquires DeriveRefPod {
+    let store_addr = primary_store_address(owner, metadata);
+    if (fungible_asset::store_exists(store_addr)) {
+        object::address_to_object(store_addr)
+    } else {
+        create_primary_store(owner, metadata)
+    }
+}
+
+ + + +
+ + + +## Function `create_primary_store` + +Create a primary store object to hold fungible asset for the given address. + + +
public fun create_primary_store<T: key>(owner_addr: address, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
+
+ + + +
+Implementation + + +
public fun create_primary_store<T: key>(
+    owner_addr: address,
+    metadata: Object<T>,
+): Object<FungibleStore> acquires DeriveRefPod {
+    let metadata_addr = object::object_address(&metadata);
+    object::address_to_object<Metadata>(metadata_addr);
+    let derive_ref = &borrow_global<DeriveRefPod>(metadata_addr).metadata_derive_ref;
+    let constructor_ref = &object::create_user_derived_object(owner_addr, derive_ref);
+    // Disable ungated transfer as deterministic stores shouldn't be transferrable.
+    let transfer_ref = &object::generate_transfer_ref(constructor_ref);
+    object::disable_ungated_transfer(transfer_ref);
+
+    fungible_asset::create_store(constructor_ref, metadata)
+}
+
+ + + +
+ + + +## Function `primary_store_address` + +Get the address of the primary store for the given account. + + +
#[view]
+public fun primary_store_address<T: key>(owner: address, metadata: object::Object<T>): address
+
+ + + +
+Implementation + + +
public fun primary_store_address<T: key>(owner: address, metadata: Object<T>): address {
+    let metadata_addr = object::object_address(&metadata);
+    object::create_user_derived_object_address(owner, metadata_addr)
+}
+
+ + + +
+ + + +## Function `primary_store` + +Get the primary store object for the given account. + + +
#[view]
+public fun primary_store<T: key>(owner: address, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
+
+ + + +
+Implementation + + +
public fun primary_store<T: key>(owner: address, metadata: Object<T>): Object<FungibleStore> {
+    let store = primary_store_address(owner, metadata);
+    object::address_to_object<FungibleStore>(store)
+}
+
+ + + +
+ + + +## Function `primary_store_exists` + +Return whether the given account's primary store exists. + + +
#[view]
+public fun primary_store_exists<T: key>(account: address, metadata: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun primary_store_exists<T: key>(account: address, metadata: Object<T>): bool {
+    fungible_asset::store_exists(primary_store_address(account, metadata))
+}
+
+ + + +
+ + + +## Function `primary_store_address_inlined` + +Get the address of the primary store for the given account. +Use instead of the corresponding view functions for dispatchable hooks to avoid circular dependencies of modules. + + +
public fun primary_store_address_inlined<T: key>(owner: address, metadata: object::Object<T>): address
+
+ + + +
+Implementation + + +
public inline fun primary_store_address_inlined<T: key>(owner: address, metadata: Object<T>): address {
+    let metadata_addr = object::object_address(&metadata);
+    object::create_user_derived_object_address(owner, metadata_addr)
+}
+
+ + + +
+ + + +## Function `primary_store_inlined` + +Get the primary store object for the given account. +Use instead of the corresponding view functions for dispatchable hooks to avoid circular dependencies of modules. + + +
public fun primary_store_inlined<T: key>(owner: address, metadata: object::Object<T>): object::Object<fungible_asset::FungibleStore>
+
+ + + +
+Implementation + + +
public inline fun primary_store_inlined<T: key>(owner: address, metadata: Object<T>): Object<FungibleStore> {
+    let store = primary_store_address_inlined(owner, metadata);
+    object::address_to_object(store)
+}
+
+ + + +
+ + + +## Function `primary_store_exists_inlined` + +Return whether the given account's primary store exists. +Use instead of the corresponding view functions for dispatchable hooks to avoid circular dependencies of modules. + + +
public fun primary_store_exists_inlined<T: key>(account: address, metadata: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public inline fun primary_store_exists_inlined<T: key>(account: address, metadata: Object<T>): bool {
+    fungible_asset::store_exists(primary_store_address_inlined(account, metadata))
+}
+
+ + + +
+ + + +## Function `balance` + +Get the balance of account's primary store. + + +
#[view]
+public fun balance<T: key>(account: address, metadata: object::Object<T>): u64
+
+ + + +
+Implementation + + +
public fun balance<T: key>(account: address, metadata: Object<T>): u64 {
+    if (primary_store_exists(account, metadata)) {
+        fungible_asset::balance(primary_store(account, metadata))
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `is_balance_at_least` + + + +
#[view]
+public fun is_balance_at_least<T: key>(account: address, metadata: object::Object<T>, amount: u64): bool
+
+ + + +
+Implementation + + +
public fun is_balance_at_least<T: key>(account: address, metadata: Object<T>, amount: u64): bool {
+    if (primary_store_exists(account, metadata)) {
+        fungible_asset::is_balance_at_least(primary_store(account, metadata), amount)
+    } else {
+        amount == 0
+    }
+}
+
+ + + +
+ + + +## Function `is_frozen` + +Return whether the given account's primary store is frozen. + + +
#[view]
+public fun is_frozen<T: key>(account: address, metadata: object::Object<T>): bool
+
+ + + +
+Implementation + + +
public fun is_frozen<T: key>(account: address, metadata: Object<T>): bool {
+    if (primary_store_exists(account, metadata)) {
+        fungible_asset::is_frozen(primary_store(account, metadata))
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `withdraw` + +Withdraw amount of fungible asset from the given account's primary store. + + +
public fun withdraw<T: key>(owner: &signer, metadata: object::Object<T>, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun withdraw<T: key>(owner: &signer, metadata: Object<T>, amount: u64): FungibleAsset acquires DeriveRefPod {
+    let store = ensure_primary_store_exists(signer::address_of(owner), metadata);
+    // Check if the store object has been burnt or not. If so, unburn it first.
+    may_be_unburn(owner, store);
+    dispatchable_fungible_asset::withdraw(owner, store, amount)
+}
+
+ + + +
+ + + +## Function `deposit` + +Deposit fungible asset fa to the given account's primary store. + + +
public fun deposit(owner: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun deposit(owner: address, fa: FungibleAsset) acquires DeriveRefPod {
+    let metadata = fungible_asset::asset_metadata(&fa);
+    let store = ensure_primary_store_exists(owner, metadata);
+    dispatchable_fungible_asset::deposit(store, fa);
+}
+
+ + + +
+ + + +## Function `force_deposit` + +Deposit fungible asset fa to the given account's primary store. + + +
public(friend) fun force_deposit(owner: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public(friend) fun force_deposit(owner: address, fa: FungibleAsset) acquires DeriveRefPod {
+    let metadata = fungible_asset::asset_metadata(&fa);
+    let store = ensure_primary_store_exists(owner, metadata);
+    fungible_asset::deposit_internal(object::object_address(&store), fa);
+}
+
+ + + +
+ + + +## Function `transfer` + +Transfer amount of fungible asset from sender's primary store to receiver's primary store. + + +
public entry fun transfer<T: key>(sender: &signer, metadata: object::Object<T>, recipient: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer<T: key>(
+    sender: &signer,
+    metadata: Object<T>,
+    recipient: address,
+    amount: u64,
+) acquires DeriveRefPod {
+    let sender_store = ensure_primary_store_exists(signer::address_of(sender), metadata);
+    // Check if the sender store object has been burnt or not. If so, unburn it first.
+    may_be_unburn(sender, sender_store);
+    let recipient_store = ensure_primary_store_exists(recipient, metadata);
+    dispatchable_fungible_asset::transfer(sender, sender_store, recipient_store, amount);
+}
+
+ + + +
+ + + +## Function `transfer_assert_minimum_deposit` + +Transfer amount of fungible asset from sender's primary store to receiver's primary store. +Use the minimum deposit assertion api to make sure receipient will receive a minimum amount of fund. + + +
public entry fun transfer_assert_minimum_deposit<T: key>(sender: &signer, metadata: object::Object<T>, recipient: address, amount: u64, expected: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer_assert_minimum_deposit<T: key>(
+    sender: &signer,
+    metadata: Object<T>,
+    recipient: address,
+    amount: u64,
+    expected: u64,
+) acquires DeriveRefPod {
+    let sender_store = ensure_primary_store_exists(signer::address_of(sender), metadata);
+    // Check if the sender store object has been burnt or not. If so, unburn it first.
+    may_be_unburn(sender, sender_store);
+    let recipient_store = ensure_primary_store_exists(recipient, metadata);
+    dispatchable_fungible_asset::transfer_assert_minimum_deposit(
+        sender,
+        sender_store,
+        recipient_store,
+        amount,
+        expected
+    );
+}
+
+ + + +
+ + + +## Function `mint` + +Mint to the primary store of owner. + + +
public fun mint(mint_ref: &fungible_asset::MintRef, owner: address, amount: u64)
+
+ + + +
+Implementation + + +
public fun mint(mint_ref: &MintRef, owner: address, amount: u64) acquires DeriveRefPod {
+    let primary_store = ensure_primary_store_exists(owner, fungible_asset::mint_ref_metadata(mint_ref));
+    fungible_asset::mint_to(mint_ref, primary_store, amount);
+}
+
+ + + +
+ + + +## Function `burn` + +Burn from the primary store of owner. + + +
public fun burn(burn_ref: &fungible_asset::BurnRef, owner: address, amount: u64)
+
+ + + +
+Implementation + + +
public fun burn(burn_ref: &BurnRef, owner: address, amount: u64) {
+    let primary_store = primary_store(owner, fungible_asset::burn_ref_metadata(burn_ref));
+    fungible_asset::burn_from(burn_ref, primary_store, amount);
+}
+
+ + + +
+ + + +## Function `set_frozen_flag` + +Freeze/Unfreeze the primary store of owner. + + +
public fun set_frozen_flag(transfer_ref: &fungible_asset::TransferRef, owner: address, frozen: bool)
+
+ + + +
+Implementation + + +
public fun set_frozen_flag(transfer_ref: &TransferRef, owner: address, frozen: bool) acquires DeriveRefPod {
+    let primary_store = ensure_primary_store_exists(owner, fungible_asset::transfer_ref_metadata(transfer_ref));
+    fungible_asset::set_frozen_flag(transfer_ref, primary_store, frozen);
+}
+
+ + + +
+ + + +## Function `withdraw_with_ref` + +Withdraw from the primary store of owner ignoring frozen flag. + + +
public fun withdraw_with_ref(transfer_ref: &fungible_asset::TransferRef, owner: address, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
public fun withdraw_with_ref(transfer_ref: &TransferRef, owner: address, amount: u64): FungibleAsset {
+    let from_primary_store = primary_store(owner, fungible_asset::transfer_ref_metadata(transfer_ref));
+    fungible_asset::withdraw_with_ref(transfer_ref, from_primary_store, amount)
+}
+
+ + + +
+ + + +## Function `deposit_with_ref` + +Deposit from the primary store of owner ignoring frozen flag. + + +
public fun deposit_with_ref(transfer_ref: &fungible_asset::TransferRef, owner: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun deposit_with_ref(transfer_ref: &TransferRef, owner: address, fa: FungibleAsset) acquires DeriveRefPod {
+    let from_primary_store = ensure_primary_store_exists(
+        owner,
+        fungible_asset::transfer_ref_metadata(transfer_ref)
+    );
+    fungible_asset::deposit_with_ref(transfer_ref, from_primary_store, fa);
+}
+
+ + + +
+ + + +## Function `transfer_with_ref` + +Transfer amount of FA from the primary store of from to that of to ignoring frozen flag. + + +
public fun transfer_with_ref(transfer_ref: &fungible_asset::TransferRef, from: address, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public fun transfer_with_ref(
+    transfer_ref: &TransferRef,
+    from: address,
+    to: address,
+    amount: u64
+) acquires DeriveRefPod {
+    let from_primary_store = primary_store(from, fungible_asset::transfer_ref_metadata(transfer_ref));
+    let to_primary_store = ensure_primary_store_exists(to, fungible_asset::transfer_ref_metadata(transfer_ref));
+    fungible_asset::transfer_with_ref(transfer_ref, from_primary_store, to_primary_store, amount);
+}
+
+ + + +
+ + + +## Function `may_be_unburn` + + + +
fun may_be_unburn(owner: &signer, store: object::Object<fungible_asset::FungibleStore>)
+
+ + + +
+Implementation + + +
fun may_be_unburn(owner: &signer, store: Object<FungibleStore>) {
+    if (object::is_burnt(store)) {
+        object::unburn(owner, store);
+    };
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Creating a fungible asset with primary store support should initiate a derived reference and store it under the metadata object.MediumThe function create_primary_store_enabled_fungible_asset makes an existing object, fungible, via the fungible_asset::add_fungibility function and initializes the DeriveRefPod resource by generating a DeriveRef for the object and then stores it under the object address.Audited that the DeriveRefPod has been properly initialized and stored under the metadata object.
2Fetching and creating a primary fungible store of an asset should only succeed if the object supports primary store.LowThe function create_primary_store is used to create a primary store by borrowing the DeriveRef resource from the object. In case the resource does not exist, creation will fail. The function ensure_primary_store_exists is used to fetch the primary store if it exists, otherwise it will create one via the create_primary function.Audited that it aborts if the DeriveRefPod doesn't exist. Audited that it aborts if the FungibleStore resource exists already under the object address.
3It should be possible to create a primary store to hold a fungible asset.MediumThe function create_primary_store borrows the DeriveRef resource from DeriveRefPod and then creates the store which is returned.Audited that it returns the newly created FungibleStore.
4Fetching the balance or the frozen status of a primary store should never abort.LowThe function balance returns the balance of the store, if the store exists, otherwise it returns 0. The function is_frozen returns the frozen flag of the fungible store, if the store exists, otherwise it returns false.Audited that the balance function returns the balance of the FungibleStore. Audited that the is_frozen function returns the frozen status of the FungibleStore resource. Audited that it never aborts.
5The ability to withdraw, deposit, transfer, mint and burn should only be available for assets with primary store support.MediumThe primary store is fetched before performing either of withdraw, deposit, transfer, mint, burn operation. If the FungibleStore resource doesn't exist the operation will fail.Audited that it aborts if the primary store FungibleStore doesn't exist.
6The action of depositing a fungible asset of the same type as the store should never fail if the store is not frozen.MediumThe function deposit fetches the owner's store, if it doesn't exist it will be created, and then deposits the fungible asset to it. The function deposit_with_ref fetches the owner's store, if it doesn't exist it will be created, and then deposit the fungible asset via the fungible_asset::deposit_with_ref function. Depositing fails if the metadata of the FungibleStore and FungibleAsset differs.Audited that it aborts if the store is frozen (deposit). Audited that the balance of the store is increased by the deposit amount (deposit, deposit_with_ref). Audited that it aborts if the metadata of the store and the asset differs (deposit, deposit_with_ref).
7Withdrawing should only be allowed to the owner of an existing store with sufficient balance.CriticalThe withdraw function fetches the owner's store via the primary_store function and then calls fungible_asset::withdraw which validates the owner of the store, checks the frozen status and the balance of the store. The withdraw_with_ref function fetches the store of the owner via primary_store function and calls the fungible_asset::withdraw_with_ref which validates transfer_ref's metadata with the withdrawing stores metadata, and the balance of the store.Audited that it aborts if the owner doesn't own the store (withdraw). Audited that it aborts if the store is frozen (withdraw). Audited that it aborts if the transfer ref's metadata doesn't match the withdrawing store's metadata (withdraw_with_ref). Audited that it aborts if the store doesn't have sufficient balance. Audited that the store is not burned. Audited that the balance of the store is decreased by the amount withdrawn.
8Only the fungible store owner is allowed to unburn a burned store.HighThe function may_be_unburn checks if the store is burned and then proceeds to call object::unburn which ensures that the owner of the object matches the address of the signer.Audited that the store is unburned successfully.
9Only the owner of a primary store can transfer its balance to any recipient's primary store.HighThe function transfer fetches sender and recipient's primary stores, if the sender's store is burned it unburns the store and calls the fungile_asset::transfer to proceed with the transfer, which first withdraws the assets from the sender's store and then deposits to the recipient's store. The function transfer_with_ref fetches the sender's and recipient's stores and calls the fungible_asset::transfer_with_ref function which withdraws the asset with the ref from the sender and deposits the asset to the recipient with the ref.Audited the deposit and withdraw (transfer). Audited the deposit_with_ref and withdraw_with_ref (transfer_with_ref). Audited that the store balance of the sender is decreased by the specified amount and its added to the recipients store. (transfer, transfer_with_ref) Audited that the sender's store is not burned (transfer).
10Minting an amount of assets to an unfrozen store is only allowed with a valid mint reference.HighThe mint function fetches the primary store and calls the fungible_asset::mint_to, which mints with MintRef's metadata which internally validates the amount and the increases the total supply of the asset. And the minted asset is deposited to the provided store by validating that the store is unfrozen and the store's metadata is the same as the depositing asset's metadata.Audited that it aborts if the amount is equal to 0. Audited that it aborts if the store is frozen. Audited that it aborts if the mint_ref's metadata is not the same as the store's metadata. Audited that the asset's total supply is increased by the amount minted. Audited that the balance of the store is increased by the minted amount.
11Burning an amount of assets from an existing unfrozen store is only allowed with a valid burn reference.HighThe burn function fetches the primary store and calls the fungible_asset::burn_from function which withdraws the amount from the store while enforcing that the store has enough balance and burns the withdrawn asset after validating the asset's metadata and the BurnRef's metadata followed by decreasing the supply of the asset.Audited that it aborts if the metadata of the store is not same as the BurnRef's metadata. Audited that it aborts if the burning amount is 0. Audited that it aborts if the store doesn't have enough balance. Audited that it aborts if the asset's metadata is not same as the BurnRef's metadata. Audited that the total supply of the asset is decreased. Audited that the store's balance is reduced by the amount burned.
12Setting the frozen flag of a store is only allowed with a valid reference.HighThe function set_frozen_flag fetches the primary store and calls fungible_asset::set_frozen_flag which validates the TransferRef's metadata with the store's metadata and then updates the frozen flag.Audited that it aborts if the store's metadata is not same as the TransferRef's metadata. Audited that the status of the frozen flag is updated correctly.
+ + + + + + +### Module-level Specification + + +
pragma verify = false;
+
+ + + + + + + +
fun spec_primary_store_exists<T: key>(account: address, metadata: Object<T>): bool {
+   fungible_asset::store_exists(spec_primary_store_address(account, metadata))
+}
+
+ + + + + + + +
fun spec_primary_store_address<T: key>(owner: address, metadata: Object<T>): address {
+   let metadata_addr = object::object_address(metadata);
+   object::spec_create_user_derived_object_address(owner, metadata_addr)
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness.md new file mode 100644 index 0000000000000..80f27e49459c6 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness.md @@ -0,0 +1,1336 @@ + + + +# Module `0x1::randomness` + +This module provides access to *instant* secure randomness generated by the Aptos validators, as documented in +[AIP-41](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-41.md). + +Secure randomness means (1) the randomness cannot be predicted ahead of time by validators, developers or users +and (2) the randomness cannot be biased in any way by validators, developers or users. + +Security holds under the same proof-of-stake assumption that secures the Aptos network. + + +- [Resource `PerBlockRandomness`](#0x1_randomness_PerBlockRandomness) +- [Struct `RandomnessGeneratedEvent`](#0x1_randomness_RandomnessGeneratedEvent) +- [Resource `Ghost$var`](#0x1_randomness_Ghost$var) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_randomness_initialize) +- [Function `on_new_block`](#0x1_randomness_on_new_block) +- [Function `next_32_bytes`](#0x1_randomness_next_32_bytes) +- [Function `bytes`](#0x1_randomness_bytes) +- [Function `u8_integer`](#0x1_randomness_u8_integer) +- [Function `u16_integer`](#0x1_randomness_u16_integer) +- [Function `u32_integer`](#0x1_randomness_u32_integer) +- [Function `u64_integer`](#0x1_randomness_u64_integer) +- [Function `u128_integer`](#0x1_randomness_u128_integer) +- [Function `u256_integer`](#0x1_randomness_u256_integer) +- [Function `u256_integer_internal`](#0x1_randomness_u256_integer_internal) +- [Function `u8_range`](#0x1_randomness_u8_range) +- [Function `u16_range`](#0x1_randomness_u16_range) +- [Function `u32_range`](#0x1_randomness_u32_range) +- [Function `u64_range`](#0x1_randomness_u64_range) +- [Function `u64_range_internal`](#0x1_randomness_u64_range_internal) +- [Function `u128_range`](#0x1_randomness_u128_range) +- [Function `u256_range`](#0x1_randomness_u256_range) +- [Function `permutation`](#0x1_randomness_permutation) +- [Function `safe_add_mod`](#0x1_randomness_safe_add_mod) +- [Function `take_first`](#0x1_randomness_take_first) +- [Function `fetch_and_increment_txn_counter`](#0x1_randomness_fetch_and_increment_txn_counter) +- [Function `is_unbiasable`](#0x1_randomness_is_unbiasable) +- [Specification](#@Specification_1) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `on_new_block`](#@Specification_1_on_new_block) + - [Function `next_32_bytes`](#@Specification_1_next_32_bytes) + - [Function `u8_integer`](#@Specification_1_u8_integer) + - [Function `u16_integer`](#@Specification_1_u16_integer) + - [Function `u32_integer`](#@Specification_1_u32_integer) + - [Function `u64_integer`](#@Specification_1_u64_integer) + - [Function `u128_integer`](#@Specification_1_u128_integer) + - [Function `u256_integer`](#@Specification_1_u256_integer) + - [Function `u256_integer_internal`](#@Specification_1_u256_integer_internal) + - [Function `u8_range`](#@Specification_1_u8_range) + - [Function `u64_range`](#@Specification_1_u64_range) + - [Function `u256_range`](#@Specification_1_u256_range) + - [Function `permutation`](#@Specification_1_permutation) + - [Function `fetch_and_increment_txn_counter`](#@Specification_1_fetch_and_increment_txn_counter) + - [Function `is_unbiasable`](#@Specification_1_is_unbiasable) + + +
use 0x1::event;
+use 0x1::hash;
+use 0x1::option;
+use 0x1::system_addresses;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Resource `PerBlockRandomness` + +32-byte randomness seed unique to every block. +This resource is updated in every block prologue. + + +
struct PerBlockRandomness has drop, key
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+round: u64 +
+
+ +
+
+seed: option::Option<vector<u8>> +
+
+ +
+
+ + +
+ + + +## Struct `RandomnessGeneratedEvent` + +Event emitted every time a public randomness API in this module is called. + + +
#[event]
+struct RandomnessGeneratedEvent has drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$var` + + + +
struct Ghost$var has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U256: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
+ + + + + + + +
const DST: vector<u8> = [65, 80, 84, 79, 83, 95, 82, 65, 78, 68, 79, 77, 78, 69, 83, 83];
+
+ + + + + +Randomness APIs calls must originate from a private entry function with +#[randomness] annotation. Otherwise, malicious users can bias randomness result. + + +
const E_API_USE_IS_BIASIBLE: u64 = 1;
+
+ + + + + +## Function `initialize` + +Called in genesis.move. +Must be called in tests to initialize the PerBlockRandomness resource. + + +
public fun initialize(framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<PerBlockRandomness>(@aptos_framework)) {
+        move_to(framework, PerBlockRandomness {
+            epoch: 0,
+            round: 0,
+            seed: option::none(),
+        });
+    }
+}
+
+ + + +
+ + + +## Function `on_new_block` + +Invoked in block prologues to update the block-level randomness seed. + + +
public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: Option<vector<u8>>) acquires PerBlockRandomness {
+    system_addresses::assert_vm(vm);
+    if (exists<PerBlockRandomness>(@aptos_framework)) {
+        let randomness = borrow_global_mut<PerBlockRandomness>(@aptos_framework);
+        randomness.epoch = epoch;
+        randomness.round = round;
+        randomness.seed = seed_for_new_block;
+    }
+}
+
+ + + +
+ + + +## Function `next_32_bytes` + +Generate the next 32 random bytes. Repeated calls will yield different results (assuming the collision-resistance +of the hash function). + + +
fun next_32_bytes(): vector<u8>
+
+ + + +
+Implementation + + +
fun next_32_bytes(): vector<u8> acquires PerBlockRandomness {
+    assert!(is_unbiasable(), E_API_USE_IS_BIASIBLE);
+
+    let input = DST;
+    let randomness = borrow_global<PerBlockRandomness>(@aptos_framework);
+    let seed = *option::borrow(&randomness.seed);
+
+    vector::append(&mut input, seed);
+    vector::append(&mut input, transaction_context::get_transaction_hash());
+    vector::append(&mut input, fetch_and_increment_txn_counter());
+    hash::sha3_256(input)
+}
+
+ + + +
+ + + +## Function `bytes` + +Generates a sequence of bytes uniformly at random + + +
public fun bytes(n: u64): vector<u8>
+
+ + + +
+Implementation + + +
public fun bytes(n: u64): vector<u8> acquires PerBlockRandomness {
+    let v = vector[];
+    let c = 0;
+    while (c < n) {
+        let blob = next_32_bytes();
+        vector::reverse_append(&mut v, blob);
+
+        c = c + 32;
+    };
+
+    if (c > n) {
+        vector::trim(&mut v, n);
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    v
+}
+
+ + + +
+ + + +## Function `u8_integer` + +Generates an u8 uniformly at random. + + +
public fun u8_integer(): u8
+
+ + + +
+Implementation + + +
public fun u8_integer(): u8 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let ret: u8 = vector::pop_back(&mut raw);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u16_integer` + +Generates an u16 uniformly at random. + + +
public fun u16_integer(): u16
+
+ + + +
+Implementation + + +
public fun u16_integer(): u16 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u16 = 0;
+    while (i < 2) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u16);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u32_integer` + +Generates an u32 uniformly at random. + + +
public fun u32_integer(): u32
+
+ + + +
+Implementation + + +
public fun u32_integer(): u32 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u32 = 0;
+    while (i < 4) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u32);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u64_integer` + +Generates an u64 uniformly at random. + + +
public fun u64_integer(): u64
+
+ + + +
+Implementation + + +
public fun u64_integer(): u64 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u64 = 0;
+    while (i < 8) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u64);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u128_integer` + +Generates an u128 uniformly at random. + + +
public fun u128_integer(): u128
+
+ + + +
+Implementation + + +
public fun u128_integer(): u128 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u128 = 0;
+    while (i < 16) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u128);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u256_integer` + +Generates a u256 uniformly at random. + + +
public fun u256_integer(): u256
+
+ + + +
+Implementation + + +
public fun u256_integer(): u256 acquires PerBlockRandomness {
+    event::emit(RandomnessGeneratedEvent {});
+    u256_integer_internal()
+}
+
+ + + +
+ + + +## Function `u256_integer_internal` + +Generates a u256 uniformly at random. + + +
fun u256_integer_internal(): u256
+
+ + + +
+Implementation + + +
fun u256_integer_internal(): u256 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u256 = 0;
+    while (i < 32) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u256);
+        i = i + 1;
+    };
+    ret
+}
+
+ + + +
+ + + +## Function `u8_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u8_range(min_incl: u8, max_excl: u8): u8
+
+ + + +
+Implementation + + +
public fun u8_range(min_incl: u8, max_excl: u8): u8 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u8);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u16_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u16_range(min_incl: u16, max_excl: u16): u16
+
+ + + +
+Implementation + + +
public fun u16_range(min_incl: u16, max_excl: u16): u16 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u16);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u32_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u32_range(min_incl: u32, max_excl: u32): u32
+
+ + + +
+Implementation + + +
public fun u32_range(min_incl: u32, max_excl: u32): u32 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u32);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u64_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u64_range(min_incl: u64, max_excl: u64): u64
+
+ + + +
+Implementation + + +
public fun u64_range(min_incl: u64, max_excl: u64): u64 acquires PerBlockRandomness {
+    event::emit(RandomnessGeneratedEvent {});
+
+    u64_range_internal(min_incl, max_excl)
+}
+
+ + + +
+ + + +## Function `u64_range_internal` + + + +
public fun u64_range_internal(min_incl: u64, max_excl: u64): u64
+
+ + + +
+Implementation + + +
public fun u64_range_internal(min_incl: u64, max_excl: u64): u64 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u64);
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u128_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u128_range(min_incl: u128, max_excl: u128): u128
+
+ + + +
+Implementation + + +
public fun u128_range(min_incl: u128, max_excl: u128): u128 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u128);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u256_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own with u256_integer() + rejection sampling. + + +
public fun u256_range(min_incl: u256, max_excl: u256): u256
+
+ + + +
+Implementation + + +
public fun u256_range(min_incl: u256, max_excl: u256): u256 acquires PerBlockRandomness {
+    let range = max_excl - min_incl;
+    let r0 = u256_integer_internal();
+    let r1 = u256_integer_internal();
+
+    // Will compute sample := (r0 + r1*2^256) % range.
+
+    let sample = r1 % range;
+    let i = 0;
+    while ({
+        spec {
+            invariant sample >= 0 && sample < max_excl - min_incl;
+        };
+        i < 256
+    }) {
+        sample = safe_add_mod(sample, sample, range);
+        i = i + 1;
+    };
+
+    let sample = safe_add_mod(sample, r0 % range, range);
+    spec {
+        assert sample >= 0 && sample < max_excl - min_incl;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `permutation` + +Generate a permutation of [0, 1, ..., n-1] uniformly at random. +If n is 0, returns the empty vector. + + +
public fun permutation(n: u64): vector<u64>
+
+ + + +
+Implementation + + +
public fun permutation(n: u64): vector<u64> acquires PerBlockRandomness {
+    event::emit(RandomnessGeneratedEvent {});
+
+    let values = vector[];
+
+    if(n == 0) {
+        return vector[]
+    };
+
+    // Initialize into [0, 1, ..., n-1].
+    let i = 0;
+    while ({
+        spec {
+            invariant i <= n;
+            invariant len(values) == i;
+        };
+        i < n
+    }) {
+        std::vector::push_back(&mut values, i);
+        i = i + 1;
+    };
+    spec {
+        assert len(values) == n;
+    };
+
+    // Shuffle.
+    let tail = n - 1;
+    while ({
+        spec {
+            invariant tail >= 0 && tail < len(values);
+        };
+        tail > 0
+    }) {
+        let pop_position = u64_range_internal(0, tail + 1);
+        spec {
+            assert pop_position < len(values);
+        };
+        std::vector::swap(&mut values, pop_position, tail);
+        tail = tail - 1;
+    };
+
+    values
+}
+
+ + + +
+ + + +## Function `safe_add_mod` + +Compute (a + b) % m, assuming m >= 1, 0 <= a < m, 0<= b < m. + + +
fun safe_add_mod(a: u256, b: u256, m: u256): u256
+
+ + + +
+Implementation + + +
fun safe_add_mod(a: u256, b: u256, m: u256): u256 {
+    let a_clone = a;
+    let neg_b = m - b;
+    let a_less = a < neg_b;
+    take_first(if (a_less) { a + b } else { a_clone - neg_b }, if (!a_less) { a_clone - neg_b } else { a + b })
+}
+
+ + + +
+ + + +## Function `take_first` + + + +
fun take_first(x: u256, _y: u256): u256
+
+ + + +
+Implementation + + +
fun take_first(x: u256, _y: u256 ): u256 { x }
+
+ + + +
+ + + +## Function `fetch_and_increment_txn_counter` + +Fetches and increments a transaction-specific 32-byte randomness-related counter. +Aborts with E_API_USE_SUSCEPTIBLE_TO_TEST_AND_ABORT if randomness is not unbiasable. + + +
fun fetch_and_increment_txn_counter(): vector<u8>
+
+ + + +
+Implementation + + +
native fun fetch_and_increment_txn_counter(): vector<u8>;
+
+ + + +
+ + + +## Function `is_unbiasable` + +Called in each randomness generation function to ensure certain safety invariants, namely: +1. The transaction that led to the call of this function had a private (or friend) entry +function as its payload. +2. The entry function had #[randomness] annotation. + + +
fun is_unbiasable(): bool
+
+ + + +
+Implementation + + +
native fun is_unbiasable(): bool;
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+invariant [suspendable] chain_status::is_operating() ==> exists<PerBlockRandomness>(@aptos_framework);
+
+global var: vector<u8>;
+
+ + + + + +### Function `initialize` + + +
public fun initialize(framework: &signer)
+
+ + + + +
let framework_addr = signer::address_of(framework);
+aborts_if framework_addr != @aptos_framework;
+
+ + + + + +### Function `on_new_block` + + +
public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: option::Option<vector<u8>>)
+
+ + + + +
aborts_if signer::address_of(vm) != @vm;
+ensures exists<PerBlockRandomness>(@aptos_framework) ==> global<PerBlockRandomness>(@aptos_framework).seed == seed_for_new_block;
+ensures exists<PerBlockRandomness>(@aptos_framework) ==> global<PerBlockRandomness>(@aptos_framework).epoch == epoch;
+ensures exists<PerBlockRandomness>(@aptos_framework) ==> global<PerBlockRandomness>(@aptos_framework).round == round;
+
+ + + + + +### Function `next_32_bytes` + + +
fun next_32_bytes(): vector<u8>
+
+ + + + +
include NextBlobAbortsIf;
+let input = b"APTOS_RANDOMNESS";
+let randomness = global<PerBlockRandomness>(@aptos_framework);
+let seed = option::spec_borrow(randomness.seed);
+let txn_hash = transaction_context::spec_get_txn_hash();
+let txn_counter = spec_fetch_and_increment_txn_counter();
+ensures len(result) == 32;
+ensures result == hash::sha3_256(concat(concat(concat(input, seed), txn_hash), txn_counter));
+
+ + + + + + + +
schema NextBlobAbortsIf {
+    let randomness = global<PerBlockRandomness>(@aptos_framework);
+    aborts_if option::spec_is_none(randomness.seed);
+    aborts_if !spec_is_unbiasable();
+    aborts_if !exists<PerBlockRandomness>(@aptos_framework);
+}
+
+ + + + + +### Function `u8_integer` + + +
public fun u8_integer(): u8
+
+ + + + +
include NextBlobAbortsIf;
+
+ + + + + +### Function `u16_integer` + + +
public fun u16_integer(): u16
+
+ + + + +
pragma unroll = 2;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u32_integer` + + +
public fun u32_integer(): u32
+
+ + + + +
pragma unroll = 4;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u64_integer` + + +
public fun u64_integer(): u64
+
+ + + + +
pragma unroll = 8;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u128_integer` + + +
public fun u128_integer(): u128
+
+ + + + +
pragma unroll = 16;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u256_integer` + + +
public fun u256_integer(): u256
+
+ + + + +
pragma verify_duration_estimate = 300;
+pragma unroll = 32;
+include NextBlobAbortsIf;
+ensures [abstract] result == spec_u256_integer();
+
+ + + + + +### Function `u256_integer_internal` + + +
fun u256_integer_internal(): u256
+
+ + + + +
pragma verify_duration_estimate = 300;
+pragma unroll = 32;
+include NextBlobAbortsIf;
+
+ + + + + + + +
fun spec_u256_integer(): u256;
+
+ + + + + +### Function `u8_range` + + +
public fun u8_range(min_incl: u8, max_excl: u8): u8
+
+ + + + +
pragma verify_duration_estimate = 120;
+pragma opaque;
+include NextBlobAbortsIf;
+aborts_if min_incl >= max_excl;
+ensures result >= min_incl && result < max_excl;
+
+ + + + + +### Function `u64_range` + + +
public fun u64_range(min_incl: u64, max_excl: u64): u64
+
+ + + + +
pragma verify_duration_estimate = 120;
+include NextBlobAbortsIf;
+aborts_if min_incl >= max_excl;
+ensures result >= min_incl && result < max_excl;
+
+ + + + + +### Function `u256_range` + + +
public fun u256_range(min_incl: u256, max_excl: u256): u256
+
+ + + + +
pragma verify_duration_estimate = 120;
+include NextBlobAbortsIf;
+aborts_if min_incl >= max_excl;
+ensures result >= min_incl && result < max_excl;
+
+ + + + + +### Function `permutation` + + +
public fun permutation(n: u64): vector<u64>
+
+ + + + +
pragma aborts_if_is_partial;
+
+ + + + + + + +
fun spec_safe_add_mod(a: u256, b: u256, m: u256): u256 {
+   if (a < m - b) {
+       a + b
+   } else {
+       a - (m - b)
+   }
+}
+
+ + + + + +### Function `fetch_and_increment_txn_counter` + + +
fun fetch_and_increment_txn_counter(): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_fetch_and_increment_txn_counter();
+
+ + + + + + + +
fun spec_fetch_and_increment_txn_counter(): vector<u8>;
+
+ + + + + +### Function `is_unbiasable` + + +
fun is_unbiasable(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_is_unbiasable();
+
+ + + + + + + +
fun spec_is_unbiasable(): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_api_v0_config.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_api_v0_config.md new file mode 100644 index 0000000000000..71b3ada332a5e --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_api_v0_config.md @@ -0,0 +1,211 @@ + + + +# Module `0x1::randomness_api_v0_config` + + + +- [Resource `RequiredGasDeposit`](#0x1_randomness_api_v0_config_RequiredGasDeposit) +- [Resource `AllowCustomMaxGasFlag`](#0x1_randomness_api_v0_config_AllowCustomMaxGasFlag) +- [Function `initialize`](#0x1_randomness_api_v0_config_initialize) +- [Function `set_for_next_epoch`](#0x1_randomness_api_v0_config_set_for_next_epoch) +- [Function `set_allow_max_gas_flag_for_next_epoch`](#0x1_randomness_api_v0_config_set_allow_max_gas_flag_for_next_epoch) +- [Function `on_new_epoch`](#0x1_randomness_api_v0_config_on_new_epoch) +- [Specification](#@Specification_0) + + +
use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::option;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `RequiredGasDeposit` + + + +
struct RequiredGasDeposit has drop, store, key
+
+ + + +
+Fields + + +
+
+gas_amount: option::Option<u64> +
+
+ +
+
+ + +
+ + + +## Resource `AllowCustomMaxGasFlag` + +If this flag is set, max_gas specified inside #[randomness()] will be used as the required deposit. + + +
struct AllowCustomMaxGasFlag has drop, store, key
+
+ + + +
+Fields + + +
+
+value: bool +
+
+ +
+
+ + +
+ + + +## Function `initialize` + +Only used in genesis. + + +
fun initialize(framework: &signer, required_amount: randomness_api_v0_config::RequiredGasDeposit, allow_custom_max_gas_flag: randomness_api_v0_config::AllowCustomMaxGasFlag)
+
+ + + +
+Implementation + + +
fun initialize(framework: &signer, required_amount: RequiredGasDeposit, allow_custom_max_gas_flag: AllowCustomMaxGasFlag) {
+    system_addresses::assert_aptos_framework(framework);
+    chain_status::assert_genesis();
+    move_to(framework, required_amount);
+    move_to(framework, allow_custom_max_gas_flag);
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +This can be called by on-chain governance to update RequiredGasDeposit for the next epoch. + + +
public fun set_for_next_epoch(framework: &signer, gas_amount: option::Option<u64>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(framework: &signer, gas_amount: Option<u64>) {
+    system_addresses::assert_aptos_framework(framework);
+    config_buffer::upsert(RequiredGasDeposit { gas_amount });
+}
+
+ + + +
+ + + +## Function `set_allow_max_gas_flag_for_next_epoch` + +This can be called by on-chain governance to update AllowCustomMaxGasFlag for the next epoch. + + +
public fun set_allow_max_gas_flag_for_next_epoch(framework: &signer, value: bool)
+
+ + + +
+Implementation + + +
public fun set_allow_max_gas_flag_for_next_epoch(framework: &signer, value: bool) {
+    system_addresses::assert_aptos_framework(framework);
+    config_buffer::upsert(AllowCustomMaxGasFlag { value } );
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending RequiredGasDeposit, if there is any. + + +
public fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public fun on_new_epoch(framework: &signer) acquires RequiredGasDeposit, AllowCustomMaxGasFlag {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<RequiredGasDeposit>()) {
+        let new_config = config_buffer::extract<RequiredGasDeposit>();
+        if (exists<RequiredGasDeposit>(@aptos_framework)) {
+            *borrow_global_mut<RequiredGasDeposit>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        }
+    };
+    if (config_buffer::does_exist<AllowCustomMaxGasFlag>()) {
+        let new_config = config_buffer::extract<AllowCustomMaxGasFlag>();
+        if (exists<AllowCustomMaxGasFlag>(@aptos_framework)) {
+            *borrow_global_mut<AllowCustomMaxGasFlag>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        }
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_config.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_config.md new file mode 100644 index 0000000000000..37299fe41b95f --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_config.md @@ -0,0 +1,463 @@ + + + +# Module `0x1::randomness_config` + +Structs and functions for on-chain randomness configurations. + + +- [Resource `RandomnessConfig`](#0x1_randomness_config_RandomnessConfig) +- [Struct `ConfigOff`](#0x1_randomness_config_ConfigOff) +- [Struct `ConfigV1`](#0x1_randomness_config_ConfigV1) +- [Struct `ConfigV2`](#0x1_randomness_config_ConfigV2) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_randomness_config_initialize) +- [Function `set_for_next_epoch`](#0x1_randomness_config_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_randomness_config_on_new_epoch) +- [Function `enabled`](#0x1_randomness_config_enabled) +- [Function `new_off`](#0x1_randomness_config_new_off) +- [Function `new_v1`](#0x1_randomness_config_new_v1) +- [Function `new_v2`](#0x1_randomness_config_new_v2) +- [Function `current`](#0x1_randomness_config_current) +- [Specification](#@Specification_1) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `current`](#@Specification_1_current) + + +
use 0x1::config_buffer;
+use 0x1::copyable_any;
+use 0x1::fixed_point64;
+use 0x1::string;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `RandomnessConfig` + +The configuration of the on-chain randomness feature. + + +
struct RandomnessConfig has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ A config variant packed as an Any. + Currently the variant type is one of the following. + - ConfigOff + - ConfigV1 +
+
+ + +
+ + + +## Struct `ConfigOff` + +A randomness config variant indicating the feature is disabled. + + +
struct ConfigOff has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `ConfigV1` + +A randomness config variant indicating the feature is enabled. + + +
struct ConfigV1 has copy, drop, store
+
+ + + +
+Fields + + +
+
+secrecy_threshold: fixed_point64::FixedPoint64 +
+
+ Any validator subset should not be able to reconstruct randomness if subset_power / total_power <= secrecy_threshold, +
+
+reconstruction_threshold: fixed_point64::FixedPoint64 +
+
+ Any validator subset should be able to reconstruct randomness if subset_power / total_power > reconstruction_threshold. +
+
+ + +
+ + + +## Struct `ConfigV2` + +A randomness config variant indicating the feature is enabled with fast path. + + +
struct ConfigV2 has copy, drop, store
+
+ + + +
+Fields + + +
+
+secrecy_threshold: fixed_point64::FixedPoint64 +
+
+ Any validator subset should not be able to reconstruct randomness if subset_power / total_power <= secrecy_threshold, +
+
+reconstruction_threshold: fixed_point64::FixedPoint64 +
+
+ Any validator subset should be able to reconstruct randomness if subset_power / total_power > reconstruction_threshold. +
+
+fast_path_secrecy_threshold: fixed_point64::FixedPoint64 +
+
+ Any validator subset should not be able to reconstruct randomness via the fast path if subset_power / total_power <= fast_path_secrecy_threshold, +
+
+ + +
+ + + +## Constants + + + + + + +
const EINVALID_CONFIG_VARIANT: u64 = 1;
+
+ + + + + +## Function `initialize` + +Initialize the configuration. Used in genesis or governance. + + +
public fun initialize(framework: &signer, config: randomness_config::RandomnessConfig)
+
+ + + +
+Implementation + + +
public fun initialize(framework: &signer, config: RandomnessConfig) {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<RandomnessConfig>(@aptos_framework)) {
+        move_to(framework, config)
+    }
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +This can be called by on-chain governance to update on-chain consensus configs for the next epoch. + + +
public fun set_for_next_epoch(framework: &signer, new_config: randomness_config::RandomnessConfig)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(framework: &signer, new_config: RandomnessConfig) {
+    system_addresses::assert_aptos_framework(framework);
+    config_buffer::upsert(new_config);
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending RandomnessConfig, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires RandomnessConfig {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<RandomnessConfig>()) {
+        let new_config = config_buffer::extract<RandomnessConfig>();
+        if (exists<RandomnessConfig>(@aptos_framework)) {
+            *borrow_global_mut<RandomnessConfig>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        }
+    }
+}
+
+ + + +
+ + + +## Function `enabled` + +Check whether on-chain randomness main logic (e.g., DKGManager, RandManager, BlockMetadataExt) is enabled. + +NOTE: this returning true does not mean randomness will run. +The feature works if and only if consensus_config::validator_txn_enabled() && randomness_config::enabled(). + + +
public fun enabled(): bool
+
+ + + +
+Implementation + + +
public fun enabled(): bool acquires RandomnessConfig {
+    if (exists<RandomnessConfig>(@aptos_framework)) {
+        let config = borrow_global<RandomnessConfig>(@aptos_framework);
+        let variant_type_name = *string::bytes(copyable_any::type_name(&config.variant));
+        variant_type_name != b"0x1::randomness_config::ConfigOff"
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `new_off` + +Create a ConfigOff variant. + + +
public fun new_off(): randomness_config::RandomnessConfig
+
+ + + +
+Implementation + + +
public fun new_off(): RandomnessConfig {
+    RandomnessConfig {
+        variant: copyable_any::pack( ConfigOff {} )
+    }
+}
+
+ + + +
+ + + +## Function `new_v1` + +Create a ConfigV1 variant. + + +
public fun new_v1(secrecy_threshold: fixed_point64::FixedPoint64, reconstruction_threshold: fixed_point64::FixedPoint64): randomness_config::RandomnessConfig
+
+ + + +
+Implementation + + +
public fun new_v1(secrecy_threshold: FixedPoint64, reconstruction_threshold: FixedPoint64): RandomnessConfig {
+    RandomnessConfig {
+        variant: copyable_any::pack( ConfigV1 {
+            secrecy_threshold,
+            reconstruction_threshold
+        } )
+    }
+}
+
+ + + +
+ + + +## Function `new_v2` + +Create a ConfigV2 variant. + + +
public fun new_v2(secrecy_threshold: fixed_point64::FixedPoint64, reconstruction_threshold: fixed_point64::FixedPoint64, fast_path_secrecy_threshold: fixed_point64::FixedPoint64): randomness_config::RandomnessConfig
+
+ + + +
+Implementation + + +
public fun new_v2(
+    secrecy_threshold: FixedPoint64,
+    reconstruction_threshold: FixedPoint64,
+    fast_path_secrecy_threshold: FixedPoint64,
+): RandomnessConfig {
+    RandomnessConfig {
+        variant: copyable_any::pack( ConfigV2 {
+            secrecy_threshold,
+            reconstruction_threshold,
+            fast_path_secrecy_threshold,
+        } )
+    }
+}
+
+ + + +
+ + + +## Function `current` + +Get the currently effective randomness configuration object. + + +
public fun current(): randomness_config::RandomnessConfig
+
+ + + +
+Implementation + + +
public fun current(): RandomnessConfig acquires RandomnessConfig {
+    if (exists<RandomnessConfig>(@aptos_framework)) {
+        *borrow_global<RandomnessConfig>(@aptos_framework)
+    } else {
+        new_off()
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<RandomnessConfig>;
+aborts_if false;
+
+ + + + + +### Function `current` + + +
public fun current(): randomness_config::RandomnessConfig
+
+ + + + +
aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_config_seqnum.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_config_seqnum.md new file mode 100644 index 0000000000000..204660ce96813 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/randomness_config_seqnum.md @@ -0,0 +1,170 @@ + + + +# Module `0x1::randomness_config_seqnum` + +Randomness stall recovery utils. + +When randomness generation is stuck due to a bug, the chain is also stuck. Below is the recovery procedure. +1. Ensure more than 2/3 stakes are stuck at the same version. +1. Every validator restarts with randomness_override_seq_num set to X+1 in the node config file, +where X is the current RandomnessConfigSeqNum on chain. +1. The chain should then be unblocked. +1. Once the bug is fixed and the binary + framework have been patched, +a governance proposal is needed to set RandomnessConfigSeqNum to be X+2. + + +- [Resource `RandomnessConfigSeqNum`](#0x1_randomness_config_seqnum_RandomnessConfigSeqNum) +- [Function `set_for_next_epoch`](#0x1_randomness_config_seqnum_set_for_next_epoch) +- [Function `initialize`](#0x1_randomness_config_seqnum_initialize) +- [Function `on_new_epoch`](#0x1_randomness_config_seqnum_on_new_epoch) +- [Specification](#@Specification_0) + - [Function `on_new_epoch`](#@Specification_0_on_new_epoch) + + +
use 0x1::config_buffer;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `RandomnessConfigSeqNum` + +If this seqnum is smaller than a validator local override, the on-chain RandomnessConfig will be ignored. +Useful in a chain recovery from randomness stall. + + +
struct RandomnessConfigSeqNum has drop, store, key
+
+ + + +
+Fields + + +
+
+seq_num: u64 +
+
+ +
+
+ + +
+ + + +## Function `set_for_next_epoch` + +Update RandomnessConfigSeqNum. +Used when re-enable randomness after an emergency randomness disable via local override. + + +
public fun set_for_next_epoch(framework: &signer, seq_num: u64)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(framework: &signer, seq_num: u64) {
+    system_addresses::assert_aptos_framework(framework);
+    config_buffer::upsert(RandomnessConfigSeqNum { seq_num });
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize the configuration. Used in genesis or governance. + + +
public fun initialize(framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<RandomnessConfigSeqNum>(@aptos_framework)) {
+        move_to(framework, RandomnessConfigSeqNum { seq_num: 0 })
+    }
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending RandomnessConfig, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires RandomnessConfigSeqNum {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<RandomnessConfigSeqNum>()) {
+        let new_config = config_buffer::extract<RandomnessConfigSeqNum>();
+        if (exists<RandomnessConfigSeqNum>(@aptos_framework)) {
+            *borrow_global_mut<RandomnessConfigSeqNum>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        }
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<RandomnessConfigSeqNum>;
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration.md new file mode 100644 index 0000000000000..545dfef452233 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration.md @@ -0,0 +1,798 @@ + + + +# Module `0x1::reconfiguration` + +Publishes configuration information for validators, and issues reconfiguration events +to synchronize configuration changes for the validators. + + +- [Struct `NewEpochEvent`](#0x1_reconfiguration_NewEpochEvent) +- [Struct `NewEpoch`](#0x1_reconfiguration_NewEpoch) +- [Resource `Configuration`](#0x1_reconfiguration_Configuration) +- [Resource `DisableReconfiguration`](#0x1_reconfiguration_DisableReconfiguration) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_reconfiguration_initialize) +- [Function `disable_reconfiguration`](#0x1_reconfiguration_disable_reconfiguration) +- [Function `enable_reconfiguration`](#0x1_reconfiguration_enable_reconfiguration) +- [Function `reconfiguration_enabled`](#0x1_reconfiguration_reconfiguration_enabled) +- [Function `reconfigure`](#0x1_reconfiguration_reconfigure) +- [Function `last_reconfiguration_time`](#0x1_reconfiguration_last_reconfiguration_time) +- [Function `current_epoch`](#0x1_reconfiguration_current_epoch) +- [Function `emit_genesis_reconfiguration_event`](#0x1_reconfiguration_emit_genesis_reconfiguration_event) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `disable_reconfiguration`](#@Specification_1_disable_reconfiguration) + - [Function `enable_reconfiguration`](#@Specification_1_enable_reconfiguration) + - [Function `reconfiguration_enabled`](#@Specification_1_reconfiguration_enabled) + - [Function `reconfigure`](#@Specification_1_reconfigure) + - [Function `last_reconfiguration_time`](#@Specification_1_last_reconfiguration_time) + - [Function `current_epoch`](#@Specification_1_current_epoch) + - [Function `emit_genesis_reconfiguration_event`](#@Specification_1_emit_genesis_reconfiguration_event) + + +
use 0x1::account;
+use 0x1::chain_status;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::reconfiguration_state;
+use 0x1::signer;
+use 0x1::stake;
+use 0x1::storage_gas;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+use 0x1::transaction_fee;
+
+ + + + + +## Struct `NewEpochEvent` + +Event that signals consensus to start a new epoch, +with new configuration information. This is also called a +"reconfiguration event" + + +
#[event]
+struct NewEpochEvent has drop, store
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+ + +
+ + + +## Struct `NewEpoch` + +Event that signals consensus to start a new epoch, +with new configuration information. This is also called a +"reconfiguration event" + + +
#[event]
+struct NewEpoch has drop, store
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+ + +
+ + + +## Resource `Configuration` + +Holds information about state of reconfiguration + + +
struct Configuration has key
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ Epoch number +
+
+last_reconfiguration_time: u64 +
+
+ Time of last reconfiguration. Only changes on reconfiguration events. +
+
+events: event::EventHandle<reconfiguration::NewEpochEvent> +
+
+ Event handle for reconfiguration events +
+
+ + +
+ + + +## Resource `DisableReconfiguration` + +Reconfiguration will be disabled if this resource is published under the +aptos_framework system address + + +
struct DisableReconfiguration has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + +A Reconfiguration resource is in an invalid state + + +
const ECONFIG: u64 = 2;
+
+ + + + + +The Configuration resource is in an invalid state + + +
const ECONFIGURATION: u64 = 1;
+
+ + + + + +An invalid block time was encountered. + + +
const EINVALID_BLOCK_TIME: u64 = 4;
+
+ + + + + +An invalid block time was encountered. + + +
const EINVALID_GUID_FOR_EVENT: u64 = 5;
+
+ + + + + +A ModifyConfigCapability is in a different state than was expected + + +
const EMODIFY_CAPABILITY: u64 = 3;
+
+ + + + + +## Function `initialize` + +Only called during genesis. +Publishes Configuration resource. Can only be invoked by aptos framework account, and only a single time in Genesis. + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    // assert it matches `new_epoch_event_key()`, otherwise the event can't be recognized
+    assert!(account::get_guid_next_creation_num(signer::address_of(aptos_framework)) == 2, error::invalid_state(EINVALID_GUID_FOR_EVENT));
+    move_to<Configuration>(
+        aptos_framework,
+        Configuration {
+            epoch: 0,
+            last_reconfiguration_time: 0,
+            events: account::new_event_handle<NewEpochEvent>(aptos_framework),
+        }
+    );
+}
+
+ + + +
+ + + +## Function `disable_reconfiguration` + +Private function to temporarily halt reconfiguration. +This function should only be used for offline WriteSet generation purpose and should never be invoked on chain. + + +
fun disable_reconfiguration(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
fun disable_reconfiguration(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(reconfiguration_enabled(), error::invalid_state(ECONFIGURATION));
+    move_to(aptos_framework, DisableReconfiguration {})
+}
+
+ + + +
+ + + +## Function `enable_reconfiguration` + +Private function to resume reconfiguration. +This function should only be used for offline WriteSet generation purpose and should never be invoked on chain. + + +
fun enable_reconfiguration(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
fun enable_reconfiguration(aptos_framework: &signer) acquires DisableReconfiguration {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    assert!(!reconfiguration_enabled(), error::invalid_state(ECONFIGURATION));
+    DisableReconfiguration {} = move_from<DisableReconfiguration>(signer::address_of(aptos_framework));
+}
+
+ + + +
+ + + +## Function `reconfiguration_enabled` + + + +
fun reconfiguration_enabled(): bool
+
+ + + +
+Implementation + + +
fun reconfiguration_enabled(): bool {
+    !exists<DisableReconfiguration>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `reconfigure` + +Signal validators to start using new configuration. Must be called from friend config modules. + + +
public(friend) fun reconfigure()
+
+ + + +
+Implementation + + +
public(friend) fun reconfigure() acquires Configuration {
+    // Do not do anything if genesis has not finished.
+    if (chain_status::is_genesis() || timestamp::now_microseconds() == 0 || !reconfiguration_enabled()) {
+        return
+    };
+
+    let config_ref = borrow_global_mut<Configuration>(@aptos_framework);
+    let current_time = timestamp::now_microseconds();
+
+    // Do not do anything if a reconfiguration event is already emitted within this transaction.
+    //
+    // This is OK because:
+    // - The time changes in every non-empty block
+    // - A block automatically ends after a transaction that emits a reconfiguration event, which is guaranteed by
+    //   VM spec that all transactions comming after a reconfiguration transaction will be returned as Retry
+    //   status.
+    // - Each transaction must emit at most one reconfiguration event
+    //
+    // Thus, this check ensures that a transaction that does multiple "reconfiguration required" actions emits only
+    // one reconfiguration event.
+    //
+    if (current_time == config_ref.last_reconfiguration_time) {
+        return
+    };
+
+    reconfiguration_state::on_reconfig_start();
+
+    // Reconfiguration "forces the block" to end, as mentioned above. Therefore, we must process the collected fees
+    // explicitly so that staking can distribute them.
+    //
+    // This also handles the case when a validator is removed due to the governance proposal. In particular, removing
+    // the validator causes a reconfiguration. We explicitly process fees, i.e. we drain aggregatable coin and populate
+    // the fees table, prior to calling `on_new_epoch()`. That call, in turn, distributes transaction fees for all active
+    // and pending_inactive validators, which include any validator that is to be removed.
+    if (features::collect_and_distribute_gas_fees()) {
+        // All transactions after reconfiguration are Retry. Therefore, when the next
+        // block starts and tries to assign/burn collected fees it will be just 0 and
+        // nothing will be assigned.
+        transaction_fee::process_collected_fees();
+    };
+
+    // Call stake to compute the new validator set and distribute rewards and transaction fees.
+    stake::on_new_epoch();
+    storage_gas::on_reconfig();
+
+    assert!(current_time > config_ref.last_reconfiguration_time, error::invalid_state(EINVALID_BLOCK_TIME));
+    config_ref.last_reconfiguration_time = current_time;
+    spec {
+        assume config_ref.epoch + 1 <= MAX_U64;
+    };
+    config_ref.epoch = config_ref.epoch + 1;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            NewEpoch {
+                epoch: config_ref.epoch,
+            },
+        );
+    };
+    event::emit_event<NewEpochEvent>(
+        &mut config_ref.events,
+        NewEpochEvent {
+            epoch: config_ref.epoch,
+        },
+    );
+
+    reconfiguration_state::on_reconfig_finish();
+}
+
+ + + +
+ + + +## Function `last_reconfiguration_time` + + + +
public fun last_reconfiguration_time(): u64
+
+ + + +
+Implementation + + +
public fun last_reconfiguration_time(): u64 acquires Configuration {
+    borrow_global<Configuration>(@aptos_framework).last_reconfiguration_time
+}
+
+ + + +
+ + + +## Function `current_epoch` + + + +
public fun current_epoch(): u64
+
+ + + +
+Implementation + + +
public fun current_epoch(): u64 acquires Configuration {
+    borrow_global<Configuration>(@aptos_framework).epoch
+}
+
+ + + +
+ + + +## Function `emit_genesis_reconfiguration_event` + +Emit a NewEpochEvent event. This function will be invoked by genesis directly to generate the very first +reconfiguration event. + + +
fun emit_genesis_reconfiguration_event()
+
+ + + +
+Implementation + + +
fun emit_genesis_reconfiguration_event() acquires Configuration {
+    let config_ref = borrow_global_mut<Configuration>(@aptos_framework);
+    assert!(config_ref.epoch == 0 && config_ref.last_reconfiguration_time == 0, error::invalid_state(ECONFIGURATION));
+    config_ref.epoch = 1;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            NewEpoch {
+                epoch: config_ref.epoch,
+            },
+        );
+    };
+    event::emit_event<NewEpochEvent>(
+        &mut config_ref.events,
+        NewEpochEvent {
+            epoch: config_ref.epoch,
+        },
+    );
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The Configuration resource is stored under the Aptos framework account with initial values upon module's initialization.MediumThe Configuration resource may only be initialized with specific values and published under the aptos_framework account.Formally verified via initialize.
2The reconfiguration status may be determined at any time without causing an abort, indicating whether or not the system allows reconfiguration.LowThe reconfiguration_enabled function will never abort and always returns a boolean value that accurately represents whether the system allows reconfiguration.Formally verified via reconfiguration_enabled.
3For each reconfiguration, the epoch value (config_ref.epoch) increases by 1, and one 'NewEpochEvent' is emitted.CriticalAfter reconfiguration, the reconfigure() function increases the epoch value of the configuration by one and increments the counter of the NewEpochEvent's EventHandle by one.Audited that these two values remain in sync.
4Reconfiguration is possible only if genesis has started and reconfiguration is enabled. Also, the last reconfiguration must not be the current time, returning early without further actions otherwise.HighThe reconfigure() function may only execute to perform successful reconfiguration when genesis has started and when reconfiguration is enabled. Without satisfying both conditions, the function returns early without executing any further actions.Formally verified via reconfigure.
5Consecutive reconfigurations without the passage of time are not permitted.HighThe reconfigure() function enforces the restriction that reconfiguration may only be performed when the current time is not equal to the last_reconfiguration_time.Formally verified via reconfigure.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+invariant [suspendable] chain_status::is_operating() ==> exists<Configuration>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==>
+    (timestamp::spec_now_microseconds() >= last_reconfiguration_time());
+
+ + +Make sure the signer address is @aptos_framework. + + + + + +
schema AbortsIfNotAptosFramework {
+    aptos_framework: &signer;
+    let addr = signer::address_of(aptos_framework);
+    aborts_if !system_addresses::is_aptos_framework_address(addr);
+}
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + +Address @aptos_framework must exist resource Account and Configuration. +Already exists in framework account. +Guid_creation_num should be 2 according to logic. + + +
include AbortsIfNotAptosFramework;
+let addr = signer::address_of(aptos_framework);
+let post config = global<Configuration>(@aptos_framework);
+requires exists<Account>(addr);
+aborts_if !(global<Account>(addr).guid_creation_num == 2);
+aborts_if exists<Configuration>(@aptos_framework);
+// This enforces high-level requirement 1:
+ensures exists<Configuration>(@aptos_framework);
+ensures config.epoch == 0 && config.last_reconfiguration_time == 0;
+ensures config.events == event::EventHandle<NewEpochEvent> {
+    counter: 0,
+    guid: guid::GUID {
+        id: guid::ID {
+            creation_num: 2,
+            addr: @aptos_framework
+        }
+    }
+};
+
+ + + + + +### Function `disable_reconfiguration` + + +
fun disable_reconfiguration(aptos_framework: &signer)
+
+ + + + +
include AbortsIfNotAptosFramework;
+aborts_if exists<DisableReconfiguration>(@aptos_framework);
+ensures exists<DisableReconfiguration>(@aptos_framework);
+
+ + + + + +### Function `enable_reconfiguration` + + +
fun enable_reconfiguration(aptos_framework: &signer)
+
+ + +Make sure the caller is admin and check the resource DisableReconfiguration. + + +
include AbortsIfNotAptosFramework;
+aborts_if !exists<DisableReconfiguration>(@aptos_framework);
+ensures !exists<DisableReconfiguration>(@aptos_framework);
+
+ + + + + +### Function `reconfiguration_enabled` + + +
fun reconfiguration_enabled(): bool
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+ensures result == !exists<DisableReconfiguration>(@aptos_framework);
+
+ + + + + +### Function `reconfigure` + + +
public(friend) fun reconfigure()
+
+ + + + +
pragma verify = true;
+pragma verify_duration_estimate = 600;
+requires exists<stake::ValidatorFees>(@aptos_framework);
+let success = !(chain_status::is_genesis() || timestamp::spec_now_microseconds() == 0 || !reconfiguration_enabled())
+    && timestamp::spec_now_microseconds() != global<Configuration>(@aptos_framework).last_reconfiguration_time;
+include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
+include success ==> aptos_coin::ExistsAptosCoin;
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+aborts_if false;
+ensures success ==> global<Configuration>(@aptos_framework).epoch == old(global<Configuration>(@aptos_framework).epoch) + 1;
+ensures success ==> global<Configuration>(@aptos_framework).last_reconfiguration_time == timestamp::spec_now_microseconds();
+// This enforces high-level requirement 4 and high-level requirement 5:
+ensures !success ==> global<Configuration>(@aptos_framework).epoch == old(global<Configuration>(@aptos_framework).epoch);
+
+ + + + + +### Function `last_reconfiguration_time` + + +
public fun last_reconfiguration_time(): u64
+
+ + + + +
aborts_if !exists<Configuration>(@aptos_framework);
+ensures result == global<Configuration>(@aptos_framework).last_reconfiguration_time;
+
+ + + + + +### Function `current_epoch` + + +
public fun current_epoch(): u64
+
+ + + + +
aborts_if !exists<Configuration>(@aptos_framework);
+ensures result == global<Configuration>(@aptos_framework).epoch;
+
+ + + + + +### Function `emit_genesis_reconfiguration_event` + + +
fun emit_genesis_reconfiguration_event()
+
+ + +When genesis_event emit the epoch and the last_reconfiguration_time . +Should equal to 0 + + +
aborts_if !exists<Configuration>(@aptos_framework);
+let config_ref = global<Configuration>(@aptos_framework);
+aborts_if !(config_ref.epoch == 0 && config_ref.last_reconfiguration_time == 0);
+ensures global<Configuration>(@aptos_framework).epoch == 1;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration_state.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration_state.md new file mode 100644 index 0000000000000..38f8c3c9b5914 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration_state.md @@ -0,0 +1,573 @@ + + + +# Module `0x1::reconfiguration_state` + +Reconfiguration meta-state resources and util functions. + +WARNING: reconfiguration_state::initialize() is required before RECONFIGURE_WITH_DKG can be enabled. + + +- [Resource `State`](#0x1_reconfiguration_state_State) +- [Struct `StateInactive`](#0x1_reconfiguration_state_StateInactive) +- [Struct `StateActive`](#0x1_reconfiguration_state_StateActive) +- [Constants](#@Constants_0) +- [Function `is_initialized`](#0x1_reconfiguration_state_is_initialized) +- [Function `initialize`](#0x1_reconfiguration_state_initialize) +- [Function `initialize_for_testing`](#0x1_reconfiguration_state_initialize_for_testing) +- [Function `is_in_progress`](#0x1_reconfiguration_state_is_in_progress) +- [Function `on_reconfig_start`](#0x1_reconfiguration_state_on_reconfig_start) +- [Function `start_time_secs`](#0x1_reconfiguration_state_start_time_secs) +- [Function `on_reconfig_finish`](#0x1_reconfiguration_state_on_reconfig_finish) +- [Specification](#@Specification_1) + - [Resource `State`](#@Specification_1_State) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `initialize_for_testing`](#@Specification_1_initialize_for_testing) + - [Function `is_in_progress`](#@Specification_1_is_in_progress) + - [Function `on_reconfig_start`](#@Specification_1_on_reconfig_start) + - [Function `start_time_secs`](#@Specification_1_start_time_secs) + + +
use 0x1::copyable_any;
+use 0x1::error;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+
+ + + + + +## Resource `State` + +Reconfiguration drivers update this resources to notify other modules of some reconfiguration state. + + +
struct State has key
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ The state variant packed as an Any. + Currently the variant type is one of the following. + - ReconfigStateInactive + - ReconfigStateActive +
+
+ + +
+ + + +## Struct `StateInactive` + +A state variant indicating no reconfiguration is in progress. + + +
struct StateInactive has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `StateActive` + +A state variant indicating a reconfiguration is in progress. + + +
struct StateActive has copy, drop, store
+
+ + + +
+Fields + + +
+
+start_time_secs: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ERECONFIG_NOT_IN_PROGRESS: u64 = 1;
+
+ + + + + +## Function `is_initialized` + + + +
public fun is_initialized(): bool
+
+ + + +
+Implementation + + +
public fun is_initialized(): bool {
+    exists<State>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `initialize` + + + +
public fun initialize(fx: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(fx: &signer) {
+    system_addresses::assert_aptos_framework(fx);
+    if (!exists<State>(@aptos_framework)) {
+        move_to(fx, State {
+            variant: copyable_any::pack(StateInactive {})
+        })
+    }
+}
+
+ + + +
+ + + +## Function `initialize_for_testing` + + + +
public fun initialize_for_testing(fx: &signer)
+
+ + + +
+Implementation + + +
public fun initialize_for_testing(fx: &signer) {
+    initialize(fx)
+}
+
+ + + +
+ + + +## Function `is_in_progress` + +Return whether the reconfiguration state is marked "in progress". + + +
public(friend) fun is_in_progress(): bool
+
+ + + +
+Implementation + + +
public(friend) fun is_in_progress(): bool acquires State {
+    if (!exists<State>(@aptos_framework)) {
+        return false
+    };
+
+    let state = borrow_global<State>(@aptos_framework);
+    let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+    variant_type_name == b"0x1::reconfiguration_state::StateActive"
+}
+
+ + + +
+ + + +## Function `on_reconfig_start` + +Called at the beginning of a reconfiguration (either immediate or async) +to mark the reconfiguration state "in progress" if it is currently "stopped". + +Also record the current time as the reconfiguration start time. (Some module, e.g., stake.move, needs this info). + + +
public(friend) fun on_reconfig_start()
+
+ + + +
+Implementation + + +
public(friend) fun on_reconfig_start() acquires State {
+    if (exists<State>(@aptos_framework)) {
+        let state = borrow_global_mut<State>(@aptos_framework);
+        let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+        if (variant_type_name == b"0x1::reconfiguration_state::StateInactive") {
+            state.variant = copyable_any::pack(StateActive {
+                start_time_secs: timestamp::now_seconds()
+            });
+        }
+    };
+}
+
+ + + +
+ + + +## Function `start_time_secs` + +Get the unix time when the currently in-progress reconfiguration started. +Abort if the reconfiguration state is not "in progress". + + +
public(friend) fun start_time_secs(): u64
+
+ + + +
+Implementation + + +
public(friend) fun start_time_secs(): u64 acquires State {
+    let state = borrow_global<State>(@aptos_framework);
+    let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+    if (variant_type_name == b"0x1::reconfiguration_state::StateActive") {
+        let active = copyable_any::unpack<StateActive>(state.variant);
+        active.start_time_secs
+    } else {
+        abort(error::invalid_state(ERECONFIG_NOT_IN_PROGRESS))
+    }
+}
+
+ + + +
+ + + +## Function `on_reconfig_finish` + +Called at the end of every reconfiguration to mark the state as "stopped". +Abort if the current state is not "in progress". + + +
public(friend) fun on_reconfig_finish()
+
+ + + +
+Implementation + + +
public(friend) fun on_reconfig_finish() acquires State {
+    if (exists<State>(@aptos_framework)) {
+        let state = borrow_global_mut<State>(@aptos_framework);
+        let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+        if (variant_type_name == b"0x1::reconfiguration_state::StateActive") {
+            state.variant = copyable_any::pack(StateInactive {});
+        } else {
+            abort(error::invalid_state(ERECONFIG_NOT_IN_PROGRESS))
+        }
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
invariant [suspendable] chain_status::is_operating() ==> exists<State>(@aptos_framework);
+
+ + + + + +### Resource `State` + + +
struct State has key
+
+ + + +
+
+variant: copyable_any::Any +
+
+ The state variant packed as an Any. + Currently the variant type is one of the following. + - ReconfigStateInactive + - ReconfigStateActive +
+
+ + + +
invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" ||
+    copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive";
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive"
+    ==> from_bcs::deserializable<StateActive>(variant.data);
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive"
+    ==> from_bcs::deserializable<StateInactive>(variant.data);
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" ==>
+    type_info::type_name<StateActive>() == variant.type_name;
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive" ==>
+    type_info::type_name<StateInactive>() == variant.type_name;
+
+ + + + + +### Function `initialize` + + +
public fun initialize(fx: &signer)
+
+ + + + +
aborts_if signer::address_of(fx) != @aptos_framework;
+let post post_state = global<State>(@aptos_framework);
+ensures exists<State>(@aptos_framework);
+ensures !exists<State>(@aptos_framework) ==> from_bcs::deserializable<StateInactive>(post_state.variant.data);
+
+ + + + + +### Function `initialize_for_testing` + + +
public fun initialize_for_testing(fx: &signer)
+
+ + + + +
aborts_if signer::address_of(fx) != @aptos_framework;
+
+ + + + + +### Function `is_in_progress` + + +
public(friend) fun is_in_progress(): bool
+
+ + + + +
aborts_if false;
+
+ + + + + + + +
fun spec_is_in_progress(): bool {
+   if (!exists<State>(@aptos_framework)) {
+       false
+   } else {
+       copyable_any::type_name(global<State>(@aptos_framework).variant).bytes == b"0x1::reconfiguration_state::StateActive"
+   }
+}
+
+ + + + + +### Function `on_reconfig_start` + + +
public(friend) fun on_reconfig_start()
+
+ + + + +
aborts_if false;
+requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+let state = Any {
+    type_name: type_info::type_name<StateActive>(),
+    data: bcs::serialize(StateActive {
+        start_time_secs: timestamp::spec_now_seconds()
+    })
+};
+let pre_state = global<State>(@aptos_framework);
+let post post_state = global<State>(@aptos_framework);
+ensures (exists<State>(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateInactive") ==> copyable_any::type_name(post_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateActive";
+ensures (exists<State>(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateInactive") ==> post_state.variant == state;
+ensures (exists<State>(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateInactive") ==> from_bcs::deserializable<StateActive>(post_state.variant.data);
+
+ + + + + +### Function `start_time_secs` + + +
public(friend) fun start_time_secs(): u64
+
+ + + + +
include StartTimeSecsAbortsIf;
+
+ + + + + + + +
fun spec_start_time_secs(): u64 {
+   use aptos_std::from_bcs;
+   let state = global<State>(@aptos_framework);
+   from_bcs::deserialize<StateActive>(state.variant.data).start_time_secs
+}
+
+ + + + + + + +
schema StartTimeSecsRequirement {
+    requires exists<State>(@aptos_framework);
+    requires copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
+        == b"0x1::reconfiguration_state::StateActive";
+    include UnpackRequiresStateActive {
+        x:  global<State>(@aptos_framework).variant
+    };
+}
+
+ + + + + + + +
schema UnpackRequiresStateActive {
+    x: Any;
+    requires type_info::type_name<StateActive>() == x.type_name && from_bcs::deserializable<StateActive>(x.data);
+}
+
+ + + + + + + +
schema StartTimeSecsAbortsIf {
+    aborts_if !exists<State>(@aptos_framework);
+    include  copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
+        == b"0x1::reconfiguration_state::StateActive" ==>
+    copyable_any::UnpackAbortsIf<StateActive> {
+        self: global<State>(@aptos_framework).variant
+    };
+    aborts_if copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
+        != b"0x1::reconfiguration_state::StateActive";
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration_with_dkg.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration_with_dkg.md new file mode 100644 index 0000000000000..85b4f1dbc74e2 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/reconfiguration_with_dkg.md @@ -0,0 +1,251 @@ + + + +# Module `0x1::reconfiguration_with_dkg` + +Reconfiguration with DKG helper functions. + + +- [Function `try_start`](#0x1_reconfiguration_with_dkg_try_start) +- [Function `finish`](#0x1_reconfiguration_with_dkg_finish) +- [Function `finish_with_dkg_result`](#0x1_reconfiguration_with_dkg_finish_with_dkg_result) +- [Specification](#@Specification_0) + - [Function `try_start`](#@Specification_0_try_start) + - [Function `finish`](#@Specification_0_finish) + - [Function `finish_with_dkg_result`](#@Specification_0_finish_with_dkg_result) + + +
use 0x1::consensus_config;
+use 0x1::dkg;
+use 0x1::execution_config;
+use 0x1::features;
+use 0x1::gas_schedule;
+use 0x1::jwk_consensus_config;
+use 0x1::jwks;
+use 0x1::keyless_account;
+use 0x1::option;
+use 0x1::randomness_api_v0_config;
+use 0x1::randomness_config;
+use 0x1::randomness_config_seqnum;
+use 0x1::reconfiguration;
+use 0x1::reconfiguration_state;
+use 0x1::stake;
+use 0x1::system_addresses;
+use 0x1::validator_consensus_info;
+use 0x1::version;
+
+ + + + + +## Function `try_start` + +Trigger a reconfiguration with DKG. +Do nothing if one is already in progress. + + +
public(friend) fun try_start()
+
+ + + +
+Implementation + + +
public(friend) fun try_start() {
+    let incomplete_dkg_session = dkg::incomplete_session();
+    if (option::is_some(&incomplete_dkg_session)) {
+        let session = option::borrow(&incomplete_dkg_session);
+        if (dkg::session_dealer_epoch(session) == reconfiguration::current_epoch()) {
+            return
+        }
+    };
+    reconfiguration_state::on_reconfig_start();
+    let cur_epoch = reconfiguration::current_epoch();
+    dkg::start(
+        cur_epoch,
+        randomness_config::current(),
+        stake::cur_validator_consensus_infos(),
+        stake::next_validator_consensus_infos(),
+    );
+}
+
+ + + +
+ + + +## Function `finish` + +Clear incomplete DKG session, if it exists. +Apply buffered on-chain configs (except for ValidatorSet, which is done inside reconfiguration::reconfigure()). +Re-enable validator set changes. +Run the default reconfiguration to enter the new epoch. + + +
public(friend) fun finish(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun finish(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+    dkg::try_clear_incomplete_session(framework);
+    consensus_config::on_new_epoch(framework);
+    execution_config::on_new_epoch(framework);
+    gas_schedule::on_new_epoch(framework);
+    std::version::on_new_epoch(framework);
+    features::on_new_epoch(framework);
+    jwk_consensus_config::on_new_epoch(framework);
+    jwks::on_new_epoch(framework);
+    keyless_account::on_new_epoch(framework);
+    randomness_config_seqnum::on_new_epoch(framework);
+    randomness_config::on_new_epoch(framework);
+    randomness_api_v0_config::on_new_epoch(framework);
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `finish_with_dkg_result` + +Complete the current reconfiguration with DKG. +Abort if no DKG is in progress. + + +
fun finish_with_dkg_result(account: &signer, dkg_result: vector<u8>)
+
+ + + +
+Implementation + + +
fun finish_with_dkg_result(account: &signer, dkg_result: vector<u8>) {
+    dkg::finish(dkg_result);
+    finish(account);
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+
+ + + + + +### Function `try_start` + + +
public(friend) fun try_start()
+
+ + + + +
pragma verify_duration_estimate = 120;
+requires exists<reconfiguration::Configuration>(@aptos_framework);
+requires chain_status::is_operating();
+include stake::ResourceRequirement;
+include stake::GetReconfigStartTimeRequirement;
+include features::spec_periodical_reward_rate_decrease_enabled(
+) ==> staking_config::StakingRewardsConfigEnabledRequirement;
+aborts_if false;
+pragma verify_duration_estimate = 600;
+
+ + + + + +### Function `finish` + + +
public(friend) fun finish(framework: &signer)
+
+ + + + +
pragma verify_duration_estimate = 1500;
+include FinishRequirement;
+aborts_if false;
+
+ + + + + + + +
schema FinishRequirement {
+    framework: signer;
+    requires signer::address_of(framework) == @aptos_framework;
+    requires chain_status::is_operating();
+    requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+    include staking_config::StakingRewardsConfigRequirement;
+    requires exists<stake::ValidatorFees>(@aptos_framework);
+    include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+    requires exists<features::Features>(@std);
+    include config_buffer::OnNewEpochRequirement<version::Version>;
+    include config_buffer::OnNewEpochRequirement<gas_schedule::GasScheduleV2>;
+    include config_buffer::OnNewEpochRequirement<execution_config::ExecutionConfig>;
+    include config_buffer::OnNewEpochRequirement<consensus_config::ConsensusConfig>;
+    include config_buffer::OnNewEpochRequirement<jwks::SupportedOIDCProviders>;
+    include config_buffer::OnNewEpochRequirement<randomness_config::RandomnessConfig>;
+    include config_buffer::OnNewEpochRequirement<randomness_config_seqnum::RandomnessConfigSeqNum>;
+    include config_buffer::OnNewEpochRequirement<randomness_api_v0_config::AllowCustomMaxGasFlag>;
+    include config_buffer::OnNewEpochRequirement<randomness_api_v0_config::RequiredGasDeposit>;
+    include config_buffer::OnNewEpochRequirement<jwk_consensus_config::JWKConsensusConfig>;
+    include config_buffer::OnNewEpochRequirement<keyless_account::Configuration>;
+    include config_buffer::OnNewEpochRequirement<keyless_account::Groth16VerificationKey>;
+}
+
+ + + + + +### Function `finish_with_dkg_result` + + +
fun finish_with_dkg_result(account: &signer, dkg_result: vector<u8>)
+
+ + + + +
pragma verify_duration_estimate = 1500;
+include FinishRequirement {
+    framework: account
+};
+requires dkg::has_incomplete_session();
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/resource_account.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/resource_account.md new file mode 100644 index 0000000000000..5922cc99737f8 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/resource_account.md @@ -0,0 +1,635 @@ + + + +# Module `0x1::resource_account` + +A resource account is used to manage resources independent of an account managed by a user. +This contains several utilities to make using resource accounts more effective. + + + + +### Resource Accounts to manage liquidity pools + + +A dev wishing to use resource accounts for a liquidity pool, would likely do the following: + +1. Create a new account using resource_account::create_resource_account. This creates the +account, stores the signer_cap within a resource_account::Container, and rotates the key to +the current account's authentication key or a provided authentication key. +2. Define the liquidity pool module's address to be the same as the resource account. +3. Construct a package-publishing transaction for the resource account using the +authentication key used in step 1. +4. In the liquidity pool module's init_module function, call retrieve_resource_account_cap +which will retrieve the signer_cap and rotate the resource account's authentication key to +0x0, effectively locking it off. +5. When adding a new coin, the liquidity pool will load the capability and hence the signer to +register and store new LiquidityCoin resources. + +Code snippets to help: + +``` +fun init_module(resource_account: &signer) { +let dev_address = @DEV_ADDR; +let signer_cap = retrieve_resource_account_cap(resource_account, dev_address); +let lp = LiquidityPoolInfo { signer_cap: signer_cap, ... }; +move_to(resource_account, lp); +} +``` + +Later on during a coin registration: +``` +public fun add_coin(lp: &LP, x: Coin, y: Coin) { +if(!exists(LP::Address(lp), LiquidityCoin)) { +let mint, burn = Coin::initialize>(...); +move_to(&create_signer_with_capability(&lp.cap), LiquidityCoin{ mint, burn }); +} +... +} +``` + + + +### Resource accounts to manage an account for module publishing (i.e., contract account) + + +A dev wishes to have an account dedicated to managing a contract. The contract itself does not +require signer post initialization. The dev could do the following: +1. Create a new account using resource_account::create_resource_account_and_publish_package. +This creates the account and publishes the package for that account. +2. At a later point in time, the account creator can move the signer capability to the module. + +``` +struct MyModuleResource has key { +... +resource_signer_cap: Option, +} + +public fun provide_signer_capability(resource_signer_cap: SignerCapability) { +let account_addr = account::get_signer_capability_address(resource_signer_cap); +let resource_addr = type_info::account_address(&type_info::type_of()); +assert!(account_addr == resource_addr, EADDRESS_MISMATCH); +let module = borrow_global_mut(account_addr); +module.resource_signer_cap = option::some(resource_signer_cap); +} +``` + + + - [Resource Accounts to manage liquidity pools](#@Resource_Accounts_to_manage_liquidity_pools_0) + - [Resource accounts to manage an account for module publishing (i.e., contract account)](#@Resource_accounts_to_manage_an_account_for_module_publishing_(i.e.,_contract_account)_1) +- [Resource `Container`](#0x1_resource_account_Container) +- [Constants](#@Constants_2) +- [Function `create_resource_account`](#0x1_resource_account_create_resource_account) +- [Function `create_resource_account_and_fund`](#0x1_resource_account_create_resource_account_and_fund) +- [Function `create_resource_account_and_publish_package`](#0x1_resource_account_create_resource_account_and_publish_package) +- [Function `rotate_account_authentication_key_and_store_capability`](#0x1_resource_account_rotate_account_authentication_key_and_store_capability) +- [Function `retrieve_resource_account_cap`](#0x1_resource_account_retrieve_resource_account_cap) +- [Specification](#@Specification_3) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `create_resource_account`](#@Specification_3_create_resource_account) + - [Function `create_resource_account_and_fund`](#@Specification_3_create_resource_account_and_fund) + - [Function `create_resource_account_and_publish_package`](#@Specification_3_create_resource_account_and_publish_package) + - [Function `rotate_account_authentication_key_and_store_capability`](#@Specification_3_rotate_account_authentication_key_and_store_capability) + - [Function `retrieve_resource_account_cap`](#@Specification_3_retrieve_resource_account_cap) + + +
use 0x1::account;
+use 0x1::aptos_coin;
+use 0x1::code;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::signer;
+use 0x1::simple_map;
+use 0x1::vector;
+
+ + + + + +## Resource `Container` + + + +
struct Container has key
+
+ + + +
+Fields + + +
+
+store: simple_map::SimpleMap<address, account::SignerCapability> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ZERO_AUTH_KEY: vector<u8> = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
+
+ + + + + +Container resource not found in account + + +
const ECONTAINER_NOT_PUBLISHED: u64 = 1;
+
+ + + + + +The resource account was not created by the specified source account + + +
const EUNAUTHORIZED_NOT_OWNER: u64 = 2;
+
+ + + + + +## Function `create_resource_account` + +Creates a new resource account and rotates the authentication key to either +the optional auth key if it is non-empty (though auth keys are 32-bytes) +or the source accounts current auth key. + + +
public entry fun create_resource_account(origin: &signer, seed: vector<u8>, optional_auth_key: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun create_resource_account(
+    origin: &signer,
+    seed: vector<u8>,
+    optional_auth_key: vector<u8>,
+) acquires Container {
+    let (resource, resource_signer_cap) = account::create_resource_account(origin, seed);
+    rotate_account_authentication_key_and_store_capability(
+        origin,
+        resource,
+        resource_signer_cap,
+        optional_auth_key,
+    );
+}
+
+ + + +
+ + + +## Function `create_resource_account_and_fund` + +Creates a new resource account, transfer the amount of coins from the origin to the resource +account, and rotates the authentication key to either the optional auth key if it is +non-empty (though auth keys are 32-bytes) or the source accounts current auth key. Note, +this function adds additional resource ownership to the resource account and should only be +used for resource accounts that need access to Coin<AptosCoin>. + + +
public entry fun create_resource_account_and_fund(origin: &signer, seed: vector<u8>, optional_auth_key: vector<u8>, fund_amount: u64)
+
+ + + +
+Implementation + + +
public entry fun create_resource_account_and_fund(
+    origin: &signer,
+    seed: vector<u8>,
+    optional_auth_key: vector<u8>,
+    fund_amount: u64,
+) acquires Container {
+    let (resource, resource_signer_cap) = account::create_resource_account(origin, seed);
+    coin::register<AptosCoin>(&resource);
+    coin::transfer<AptosCoin>(origin, signer::address_of(&resource), fund_amount);
+    rotate_account_authentication_key_and_store_capability(
+        origin,
+        resource,
+        resource_signer_cap,
+        optional_auth_key,
+    );
+}
+
+ + + +
+ + + +## Function `create_resource_account_and_publish_package` + +Creates a new resource account, publishes the package under this account transaction under +this account and leaves the signer cap readily available for pickup. + + +
public entry fun create_resource_account_and_publish_package(origin: &signer, seed: vector<u8>, metadata_serialized: vector<u8>, code: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun create_resource_account_and_publish_package(
+    origin: &signer,
+    seed: vector<u8>,
+    metadata_serialized: vector<u8>,
+    code: vector<vector<u8>>,
+) acquires Container {
+    let (resource, resource_signer_cap) = account::create_resource_account(origin, seed);
+    aptos_framework::code::publish_package_txn(&resource, metadata_serialized, code);
+    rotate_account_authentication_key_and_store_capability(
+        origin,
+        resource,
+        resource_signer_cap,
+        ZERO_AUTH_KEY,
+    );
+}
+
+ + + +
+ + + +## Function `rotate_account_authentication_key_and_store_capability` + + + +
fun rotate_account_authentication_key_and_store_capability(origin: &signer, resource: signer, resource_signer_cap: account::SignerCapability, optional_auth_key: vector<u8>)
+
+ + + +
+Implementation + + +
fun rotate_account_authentication_key_and_store_capability(
+    origin: &signer,
+    resource: signer,
+    resource_signer_cap: account::SignerCapability,
+    optional_auth_key: vector<u8>,
+) acquires Container {
+    let origin_addr = signer::address_of(origin);
+    if (!exists<Container>(origin_addr)) {
+        move_to(origin, Container { store: simple_map::create() })
+    };
+
+    let container = borrow_global_mut<Container>(origin_addr);
+    let resource_addr = signer::address_of(&resource);
+    simple_map::add(&mut container.store, resource_addr, resource_signer_cap);
+
+    let auth_key = if (vector::is_empty(&optional_auth_key)) {
+        account::get_authentication_key(origin_addr)
+    } else {
+        optional_auth_key
+    };
+    account::rotate_authentication_key_internal(&resource, auth_key);
+}
+
+ + + +
+ + + +## Function `retrieve_resource_account_cap` + +When called by the resource account, it will retrieve the capability associated with that +account and rotate the account's auth key to 0x0 making the account inaccessible without +the SignerCapability. + + +
public fun retrieve_resource_account_cap(resource: &signer, source_addr: address): account::SignerCapability
+
+ + + +
+Implementation + + +
public fun retrieve_resource_account_cap(
+    resource: &signer,
+    source_addr: address,
+): account::SignerCapability acquires Container {
+    assert!(exists<Container>(source_addr), error::not_found(ECONTAINER_NOT_PUBLISHED));
+
+    let resource_addr = signer::address_of(resource);
+    let (resource_signer_cap, empty_container) = {
+        let container = borrow_global_mut<Container>(source_addr);
+        assert!(
+            simple_map::contains_key(&container.store, &resource_addr),
+            error::invalid_argument(EUNAUTHORIZED_NOT_OWNER)
+        );
+        let (_resource_addr, signer_cap) = simple_map::remove(&mut container.store, &resource_addr);
+        (signer_cap, simple_map::length(&container.store) == 0)
+    };
+
+    if (empty_container) {
+        let container = move_from(source_addr);
+        let Container { store } = container;
+        simple_map::destroy_empty(store);
+    };
+
+    account::rotate_authentication_key_internal(resource, ZERO_AUTH_KEY);
+    resource_signer_cap
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The length of the authentication key must be 32 bytes.MediumThe rotate_authentication_key_internal function ensures that the authentication key passed to it is of 32 bytes.Formally verified via RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIf.
2The Container structure must exist in the origin account in order to rotate the authentication key of a resource account and to store its signer capability.HighThe rotate_account_authentication_key_and_store_capability function makes sure the Container structure exists under the origin account.Formally verified via rotate_account_authentication_key_and_store_capability.
3The resource account is registered for the Aptos coin.HighThe create_resource_account_and_fund ensures the newly created resource account is registered to receive the AptosCoin.Formally verified via create_resource_account_and_fund.
4It is not possible to store two capabilities for the same resource address.MediumThe rotate_account_authentication_key_and_store_capability will abort if the resource signer capability for the given resource address already exists in container.store.Formally verified via rotate_account_authentication_key_and_store_capability.
5If provided, the optional authentication key is used for key rotation.LowThe rotate_account_authentication_key_and_store_capability function will use optional_auth_key if it is provided as a parameter.Formally verified via rotate_account_authentication_key_and_store_capability.
6The container stores the resource accounts' signer capabilities.Lowretrieve_resource_account_cap will abort if there is no Container structure assigned to source_addr.Formally verified via retreive_resource_account_cap.
7Resource account may retrieve the signer capability if it was previously added to its container.Highretrieve_resource_account_cap will abort if the container of source_addr doesn't store the signer capability for the given resource.Formally verified via retrieve_resource_account_cap.
8Retrieving the last signer capability from the container must result in the container being removed.Lowretrieve_resource_account_cap will remove the container if the retrieved signer_capability was the last one stored under it.Formally verified via retrieve_resource_account_cap.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `create_resource_account` + + +
public entry fun create_resource_account(origin: &signer, seed: vector<u8>, optional_auth_key: vector<u8>)
+
+ + + + +
let source_addr = signer::address_of(origin);
+let resource_addr = account::spec_create_resource_address(source_addr, seed);
+include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit;
+
+ + + + + +### Function `create_resource_account_and_fund` + + +
public entry fun create_resource_account_and_fund(origin: &signer, seed: vector<u8>, optional_auth_key: vector<u8>, fund_amount: u64)
+
+ + + + +
pragma verify = false;
+let source_addr = signer::address_of(origin);
+let resource_addr = account::spec_create_resource_address(source_addr, seed);
+let coin_store_resource = global<coin::CoinStore<AptosCoin>>(resource_addr);
+include aptos_account::WithdrawAbortsIf<AptosCoin>{from: origin, amount: fund_amount};
+include aptos_account::GuidAbortsIf<AptosCoin>{to: resource_addr};
+include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit;
+aborts_if coin::spec_is_account_registered<AptosCoin>(resource_addr) && coin_store_resource.frozen;
+// This enforces high-level requirement 3:
+ensures exists<aptos_framework::coin::CoinStore<AptosCoin>>(resource_addr);
+
+ + + + + +### Function `create_resource_account_and_publish_package` + + +
public entry fun create_resource_account_and_publish_package(origin: &signer, seed: vector<u8>, metadata_serialized: vector<u8>, code: vector<vector<u8>>)
+
+ + + + +
pragma verify = false;
+let source_addr = signer::address_of(origin);
+let resource_addr = account::spec_create_resource_address(source_addr, seed);
+let optional_auth_key = ZERO_AUTH_KEY;
+include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit;
+
+ + + + + +### Function `rotate_account_authentication_key_and_store_capability` + + +
fun rotate_account_authentication_key_and_store_capability(origin: &signer, resource: signer, resource_signer_cap: account::SignerCapability, optional_auth_key: vector<u8>)
+
+ + + + +
let resource_addr = signer::address_of(resource);
+// This enforces high-level requirement 1:
+include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIf;
+// This enforces high-level requirement 2:
+ensures exists<Container>(signer::address_of(origin));
+// This enforces high-level requirement 5:
+ensures vector::length(optional_auth_key) != 0 ==>
+    global<aptos_framework::account::Account>(resource_addr).authentication_key == optional_auth_key;
+
+ + + + + + + +
schema RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIf {
+    origin: signer;
+    resource_addr: address;
+    optional_auth_key: vector<u8>;
+    let source_addr = signer::address_of(origin);
+    let container = global<Container>(source_addr);
+    let get = len(optional_auth_key) == 0;
+    aborts_if get && !exists<Account>(source_addr);
+    // This enforces high-level requirement 4:
+    aborts_if exists<Container>(source_addr) && simple_map::spec_contains_key(container.store, resource_addr);
+    aborts_if get && !(exists<Account>(resource_addr) && len(global<Account>(source_addr).authentication_key) == 32);
+    aborts_if !get && !(exists<Account>(resource_addr) && len(optional_auth_key) == 32);
+    ensures simple_map::spec_contains_key(global<Container>(source_addr).store, resource_addr);
+    ensures exists<Container>(source_addr);
+}
+
+ + + + + + + +
schema RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit {
+    source_addr: address;
+    optional_auth_key: vector<u8>;
+    resource_addr: address;
+    let container = global<Container>(source_addr);
+    let get = len(optional_auth_key) == 0;
+    let account = global<account::Account>(source_addr);
+    aborts_if len(ZERO_AUTH_KEY) != 32;
+    include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf;
+    include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr};
+    aborts_if get && !exists<account::Account>(source_addr);
+    aborts_if exists<Container>(source_addr) && simple_map::spec_contains_key(container.store, resource_addr);
+    aborts_if get && len(global<account::Account>(source_addr).authentication_key) != 32;
+    aborts_if !get && len(optional_auth_key) != 32;
+    ensures simple_map::spec_contains_key(global<Container>(source_addr).store, resource_addr);
+    ensures exists<Container>(source_addr);
+}
+
+ + + + + +### Function `retrieve_resource_account_cap` + + +
public fun retrieve_resource_account_cap(resource: &signer, source_addr: address): account::SignerCapability
+
+ + + + +
// This enforces high-level requirement 6:
+aborts_if !exists<Container>(source_addr);
+let resource_addr = signer::address_of(resource);
+let container = global<Container>(source_addr);
+// This enforces high-level requirement 7:
+aborts_if !simple_map::spec_contains_key(container.store, resource_addr);
+aborts_if !exists<account::Account>(resource_addr);
+// This enforces high-level requirement 8:
+ensures simple_map::spec_contains_key(old(global<Container>(source_addr)).store, resource_addr) &&
+    simple_map::spec_len(old(global<Container>(source_addr)).store) == 1 ==> !exists<Container>(source_addr);
+ensures exists<Container>(source_addr) ==> !simple_map::spec_contains_key(global<Container>(source_addr).store, resource_addr);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/stake.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/stake.md new file mode 100644 index 0000000000000..865c0df8233c7 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/stake.md @@ -0,0 +1,6105 @@ + + + +# Module `0x1::stake` + + +Validator lifecycle: +1. Prepare a validator node set up and call stake::initialize_validator +2. Once ready to deposit stake (or have funds assigned by a staking service in exchange for ownership capability), +call stake::add_stake (or *_with_cap versions if called from the staking service) +3. Call stake::join_validator_set (or _with_cap version) to join the active validator set. Changes are effective in +the next epoch. +4. Validate and gain rewards. The stake will automatically be locked up for a fixed duration (set by governance) and +automatically renewed at expiration. +5. At any point, if the validator operator wants to update the consensus key or network/fullnode addresses, they can +call stake::rotate_consensus_key and stake::update_network_and_fullnode_addresses. Similar to changes to stake, the +changes to consensus key/network/fullnode addresses are only effective in the next epoch. +6. Validator can request to unlock their stake at any time. However, their stake will only become withdrawable when +their current lockup expires. This can be at most as long as the fixed lockup duration. +7. After exiting, the validator can either explicitly leave the validator set by calling stake::leave_validator_set +or if their stake drops below the min required, they would get removed at the end of the epoch. +8. Validator can always rejoin the validator set by going through steps 2-3 again. +9. An owner can always switch operators by calling stake::set_operator. +10. An owner can always switch designated voter by calling stake::set_designated_voter. + + +- [Resource `OwnerCapability`](#0x1_stake_OwnerCapability) +- [Resource `StakePool`](#0x1_stake_StakePool) +- [Resource `ValidatorConfig`](#0x1_stake_ValidatorConfig) +- [Struct `ValidatorInfo`](#0x1_stake_ValidatorInfo) +- [Resource `ValidatorSet`](#0x1_stake_ValidatorSet) +- [Resource `AptosCoinCapabilities`](#0x1_stake_AptosCoinCapabilities) +- [Struct `IndividualValidatorPerformance`](#0x1_stake_IndividualValidatorPerformance) +- [Resource `ValidatorPerformance`](#0x1_stake_ValidatorPerformance) +- [Struct `RegisterValidatorCandidateEvent`](#0x1_stake_RegisterValidatorCandidateEvent) +- [Struct `RegisterValidatorCandidate`](#0x1_stake_RegisterValidatorCandidate) +- [Struct `SetOperatorEvent`](#0x1_stake_SetOperatorEvent) +- [Struct `SetOperator`](#0x1_stake_SetOperator) +- [Struct `AddStakeEvent`](#0x1_stake_AddStakeEvent) +- [Struct `AddStake`](#0x1_stake_AddStake) +- [Struct `ReactivateStakeEvent`](#0x1_stake_ReactivateStakeEvent) +- [Struct `ReactivateStake`](#0x1_stake_ReactivateStake) +- [Struct `RotateConsensusKeyEvent`](#0x1_stake_RotateConsensusKeyEvent) +- [Struct `RotateConsensusKey`](#0x1_stake_RotateConsensusKey) +- [Struct `UpdateNetworkAndFullnodeAddressesEvent`](#0x1_stake_UpdateNetworkAndFullnodeAddressesEvent) +- [Struct `UpdateNetworkAndFullnodeAddresses`](#0x1_stake_UpdateNetworkAndFullnodeAddresses) +- [Struct `IncreaseLockupEvent`](#0x1_stake_IncreaseLockupEvent) +- [Struct `IncreaseLockup`](#0x1_stake_IncreaseLockup) +- [Struct `JoinValidatorSetEvent`](#0x1_stake_JoinValidatorSetEvent) +- [Struct `JoinValidatorSet`](#0x1_stake_JoinValidatorSet) +- [Struct `DistributeRewardsEvent`](#0x1_stake_DistributeRewardsEvent) +- [Struct `DistributeRewards`](#0x1_stake_DistributeRewards) +- [Struct `UnlockStakeEvent`](#0x1_stake_UnlockStakeEvent) +- [Struct `UnlockStake`](#0x1_stake_UnlockStake) +- [Struct `WithdrawStakeEvent`](#0x1_stake_WithdrawStakeEvent) +- [Struct `WithdrawStake`](#0x1_stake_WithdrawStake) +- [Struct `LeaveValidatorSetEvent`](#0x1_stake_LeaveValidatorSetEvent) +- [Struct `LeaveValidatorSet`](#0x1_stake_LeaveValidatorSet) +- [Resource `ValidatorFees`](#0x1_stake_ValidatorFees) +- [Resource `AllowedValidators`](#0x1_stake_AllowedValidators) +- [Resource `Ghost$ghost_valid_perf`](#0x1_stake_Ghost$ghost_valid_perf) +- [Resource `Ghost$ghost_proposer_idx`](#0x1_stake_Ghost$ghost_proposer_idx) +- [Resource `Ghost$ghost_active_num`](#0x1_stake_Ghost$ghost_active_num) +- [Resource `Ghost$ghost_pending_inactive_num`](#0x1_stake_Ghost$ghost_pending_inactive_num) +- [Constants](#@Constants_0) +- [Function `initialize_validator_fees`](#0x1_stake_initialize_validator_fees) +- [Function `add_transaction_fee`](#0x1_stake_add_transaction_fee) +- [Function `get_lockup_secs`](#0x1_stake_get_lockup_secs) +- [Function `get_remaining_lockup_secs`](#0x1_stake_get_remaining_lockup_secs) +- [Function `get_stake`](#0x1_stake_get_stake) +- [Function `get_validator_state`](#0x1_stake_get_validator_state) +- [Function `get_current_epoch_voting_power`](#0x1_stake_get_current_epoch_voting_power) +- [Function `get_delegated_voter`](#0x1_stake_get_delegated_voter) +- [Function `get_operator`](#0x1_stake_get_operator) +- [Function `get_owned_pool_address`](#0x1_stake_get_owned_pool_address) +- [Function `get_validator_index`](#0x1_stake_get_validator_index) +- [Function `get_current_epoch_proposal_counts`](#0x1_stake_get_current_epoch_proposal_counts) +- [Function `get_validator_config`](#0x1_stake_get_validator_config) +- [Function `stake_pool_exists`](#0x1_stake_stake_pool_exists) +- [Function `initialize`](#0x1_stake_initialize) +- [Function `store_aptos_coin_mint_cap`](#0x1_stake_store_aptos_coin_mint_cap) +- [Function `remove_validators`](#0x1_stake_remove_validators) +- [Function `initialize_stake_owner`](#0x1_stake_initialize_stake_owner) +- [Function `initialize_validator`](#0x1_stake_initialize_validator) +- [Function `initialize_owner`](#0x1_stake_initialize_owner) +- [Function `extract_owner_cap`](#0x1_stake_extract_owner_cap) +- [Function `deposit_owner_cap`](#0x1_stake_deposit_owner_cap) +- [Function `destroy_owner_cap`](#0x1_stake_destroy_owner_cap) +- [Function `set_operator`](#0x1_stake_set_operator) +- [Function `set_operator_with_cap`](#0x1_stake_set_operator_with_cap) +- [Function `set_delegated_voter`](#0x1_stake_set_delegated_voter) +- [Function `set_delegated_voter_with_cap`](#0x1_stake_set_delegated_voter_with_cap) +- [Function `add_stake`](#0x1_stake_add_stake) +- [Function `add_stake_with_cap`](#0x1_stake_add_stake_with_cap) +- [Function `reactivate_stake`](#0x1_stake_reactivate_stake) +- [Function `reactivate_stake_with_cap`](#0x1_stake_reactivate_stake_with_cap) +- [Function `rotate_consensus_key`](#0x1_stake_rotate_consensus_key) +- [Function `update_network_and_fullnode_addresses`](#0x1_stake_update_network_and_fullnode_addresses) +- [Function `increase_lockup`](#0x1_stake_increase_lockup) +- [Function `increase_lockup_with_cap`](#0x1_stake_increase_lockup_with_cap) +- [Function `join_validator_set`](#0x1_stake_join_validator_set) +- [Function `join_validator_set_internal`](#0x1_stake_join_validator_set_internal) +- [Function `unlock`](#0x1_stake_unlock) +- [Function `unlock_with_cap`](#0x1_stake_unlock_with_cap) +- [Function `withdraw`](#0x1_stake_withdraw) +- [Function `withdraw_with_cap`](#0x1_stake_withdraw_with_cap) +- [Function `leave_validator_set`](#0x1_stake_leave_validator_set) +- [Function `is_current_epoch_validator`](#0x1_stake_is_current_epoch_validator) +- [Function `update_performance_statistics`](#0x1_stake_update_performance_statistics) +- [Function `on_new_epoch`](#0x1_stake_on_new_epoch) +- [Function `cur_validator_consensus_infos`](#0x1_stake_cur_validator_consensus_infos) +- [Function `next_validator_consensus_infos`](#0x1_stake_next_validator_consensus_infos) +- [Function `validator_consensus_infos_from_validator_set`](#0x1_stake_validator_consensus_infos_from_validator_set) +- [Function `addresses_from_validator_infos`](#0x1_stake_addresses_from_validator_infos) +- [Function `update_stake_pool`](#0x1_stake_update_stake_pool) +- [Function `get_reconfig_start_time_secs`](#0x1_stake_get_reconfig_start_time_secs) +- [Function `calculate_rewards_amount`](#0x1_stake_calculate_rewards_amount) +- [Function `distribute_rewards`](#0x1_stake_distribute_rewards) +- [Function `append`](#0x1_stake_append) +- [Function `find_validator`](#0x1_stake_find_validator) +- [Function `generate_validator_info`](#0x1_stake_generate_validator_info) +- [Function `get_next_epoch_voting_power`](#0x1_stake_get_next_epoch_voting_power) +- [Function `update_voting_power_increase`](#0x1_stake_update_voting_power_increase) +- [Function `assert_stake_pool_exists`](#0x1_stake_assert_stake_pool_exists) +- [Function `configure_allowed_validators`](#0x1_stake_configure_allowed_validators) +- [Function `is_allowed`](#0x1_stake_is_allowed) +- [Function `assert_owner_cap_exists`](#0x1_stake_assert_owner_cap_exists) +- [Function `assert_reconfig_not_in_progress`](#0x1_stake_assert_reconfig_not_in_progress) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Resource `ValidatorSet`](#@Specification_1_ValidatorSet) + - [Function `initialize_validator_fees`](#@Specification_1_initialize_validator_fees) + - [Function `add_transaction_fee`](#@Specification_1_add_transaction_fee) + - [Function `get_validator_state`](#@Specification_1_get_validator_state) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `remove_validators`](#@Specification_1_remove_validators) + - [Function `initialize_stake_owner`](#@Specification_1_initialize_stake_owner) + - [Function `initialize_validator`](#@Specification_1_initialize_validator) + - [Function `extract_owner_cap`](#@Specification_1_extract_owner_cap) + - [Function `deposit_owner_cap`](#@Specification_1_deposit_owner_cap) + - [Function `set_operator_with_cap`](#@Specification_1_set_operator_with_cap) + - [Function `set_delegated_voter_with_cap`](#@Specification_1_set_delegated_voter_with_cap) + - [Function `add_stake`](#@Specification_1_add_stake) + - [Function `add_stake_with_cap`](#@Specification_1_add_stake_with_cap) + - [Function `reactivate_stake_with_cap`](#@Specification_1_reactivate_stake_with_cap) + - [Function `rotate_consensus_key`](#@Specification_1_rotate_consensus_key) + - [Function `update_network_and_fullnode_addresses`](#@Specification_1_update_network_and_fullnode_addresses) + - [Function `increase_lockup_with_cap`](#@Specification_1_increase_lockup_with_cap) + - [Function `join_validator_set`](#@Specification_1_join_validator_set) + - [Function `unlock_with_cap`](#@Specification_1_unlock_with_cap) + - [Function `withdraw`](#@Specification_1_withdraw) + - [Function `leave_validator_set`](#@Specification_1_leave_validator_set) + - [Function `is_current_epoch_validator`](#@Specification_1_is_current_epoch_validator) + - [Function `update_performance_statistics`](#@Specification_1_update_performance_statistics) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `next_validator_consensus_infos`](#@Specification_1_next_validator_consensus_infos) + - [Function `validator_consensus_infos_from_validator_set`](#@Specification_1_validator_consensus_infos_from_validator_set) + - [Function `update_stake_pool`](#@Specification_1_update_stake_pool) + - [Function `get_reconfig_start_time_secs`](#@Specification_1_get_reconfig_start_time_secs) + - [Function `calculate_rewards_amount`](#@Specification_1_calculate_rewards_amount) + - [Function `distribute_rewards`](#@Specification_1_distribute_rewards) + - [Function `append`](#@Specification_1_append) + - [Function `find_validator`](#@Specification_1_find_validator) + - [Function `update_voting_power_increase`](#@Specification_1_update_voting_power_increase) + - [Function `assert_stake_pool_exists`](#@Specification_1_assert_stake_pool_exists) + - [Function `configure_allowed_validators`](#@Specification_1_configure_allowed_validators) + - [Function `assert_owner_cap_exists`](#@Specification_1_assert_owner_cap_exists) + + +
use 0x1::account;
+use 0x1::aptos_coin;
+use 0x1::bls12381;
+use 0x1::chain_status;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::fixed_point64;
+use 0x1::math64;
+use 0x1::option;
+use 0x1::reconfiguration_state;
+use 0x1::signer;
+use 0x1::staking_config;
+use 0x1::system_addresses;
+use 0x1::table;
+use 0x1::timestamp;
+use 0x1::validator_consensus_info;
+use 0x1::vector;
+
+ + + + + +## Resource `OwnerCapability` + +Capability that represents ownership and can be used to control the validator and the associated stake pool. +Having this be separate from the signer for the account that the validator resources are hosted at allows +modules to have control over a validator. + + +
struct OwnerCapability has store, key
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Resource `StakePool` + +Each validator has a separate StakePool resource and can provide a stake. +Changes in stake for an active validator: +1. If a validator calls add_stake, the newly added stake is moved to pending_active. +2. If validator calls unlock, their stake is moved to pending_inactive. +2. When the next epoch starts, any pending_inactive stake is moved to inactive and can be withdrawn. +Any pending_active stake is moved to active and adds to the validator's voting power. + +Changes in stake for an inactive validator: +1. If a validator calls add_stake, the newly added stake is moved directly to active. +2. If validator calls unlock, their stake is moved directly to inactive. +3. When the next epoch starts, the validator can be activated if their active stake is more than the minimum. + + +
struct StakePool has key
+
+ + + +
+Fields + + +
+
+active: coin::Coin<aptos_coin::AptosCoin> +
+
+ +
+
+inactive: coin::Coin<aptos_coin::AptosCoin> +
+
+ +
+
+pending_active: coin::Coin<aptos_coin::AptosCoin> +
+
+ +
+
+pending_inactive: coin::Coin<aptos_coin::AptosCoin> +
+
+ +
+
+locked_until_secs: u64 +
+
+ +
+
+operator_address: address +
+
+ +
+
+delegated_voter: address +
+
+ +
+
+initialize_validator_events: event::EventHandle<stake::RegisterValidatorCandidateEvent> +
+
+ +
+
+set_operator_events: event::EventHandle<stake::SetOperatorEvent> +
+
+ +
+
+add_stake_events: event::EventHandle<stake::AddStakeEvent> +
+
+ +
+
+reactivate_stake_events: event::EventHandle<stake::ReactivateStakeEvent> +
+
+ +
+
+rotate_consensus_key_events: event::EventHandle<stake::RotateConsensusKeyEvent> +
+
+ +
+
+update_network_and_fullnode_addresses_events: event::EventHandle<stake::UpdateNetworkAndFullnodeAddressesEvent> +
+
+ +
+
+increase_lockup_events: event::EventHandle<stake::IncreaseLockupEvent> +
+
+ +
+
+join_validator_set_events: event::EventHandle<stake::JoinValidatorSetEvent> +
+
+ +
+
+distribute_rewards_events: event::EventHandle<stake::DistributeRewardsEvent> +
+
+ +
+
+unlock_stake_events: event::EventHandle<stake::UnlockStakeEvent> +
+
+ +
+
+withdraw_stake_events: event::EventHandle<stake::WithdrawStakeEvent> +
+
+ +
+
+leave_validator_set_events: event::EventHandle<stake::LeaveValidatorSetEvent> +
+
+ +
+
+ + +
+ + + +## Resource `ValidatorConfig` + +Validator info stored in validator address. + + +
struct ValidatorConfig has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+consensus_pubkey: vector<u8> +
+
+ +
+
+network_addresses: vector<u8> +
+
+ +
+
+fullnode_addresses: vector<u8> +
+
+ +
+
+validator_index: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ValidatorInfo` + +Consensus information per validator, stored in ValidatorSet. + + +
struct ValidatorInfo has copy, drop, store
+
+ + + +
+Fields + + +
+
+addr: address +
+
+ +
+
+voting_power: u64 +
+
+ +
+
+config: stake::ValidatorConfig +
+
+ +
+
+ + +
+ + + +## Resource `ValidatorSet` + +Full ValidatorSet, stored in @aptos_framework. +1. join_validator_set adds to pending_active queue. +2. leave_valdiator_set moves from active to pending_inactive queue. +3. on_new_epoch processes two pending queues and refresh ValidatorInfo from the owner's address. + + +
struct ValidatorSet has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+consensus_scheme: u8 +
+
+ +
+
+active_validators: vector<stake::ValidatorInfo> +
+
+ +
+
+pending_inactive: vector<stake::ValidatorInfo> +
+
+ +
+
+pending_active: vector<stake::ValidatorInfo> +
+
+ +
+
+total_voting_power: u128 +
+
+ +
+
+total_joining_power: u128 +
+
+ +
+
+ + +
+ + + +## Resource `AptosCoinCapabilities` + +AptosCoin capabilities, set during genesis and stored in @CoreResource account. +This allows the Stake module to mint rewards to stakers. + + +
struct AptosCoinCapabilities has key
+
+ + + +
+Fields + + +
+
+mint_cap: coin::MintCapability<aptos_coin::AptosCoin> +
+
+ +
+
+ + +
+ + + +## Struct `IndividualValidatorPerformance` + + + +
struct IndividualValidatorPerformance has drop, store
+
+ + + +
+Fields + + +
+
+successful_proposals: u64 +
+
+ +
+
+failed_proposals: u64 +
+
+ +
+
+ + +
+ + + +## Resource `ValidatorPerformance` + + + +
struct ValidatorPerformance has key
+
+ + + +
+Fields + + +
+
+validators: vector<stake::IndividualValidatorPerformance> +
+
+ +
+
+ + +
+ + + +## Struct `RegisterValidatorCandidateEvent` + + + +
struct RegisterValidatorCandidateEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `RegisterValidatorCandidate` + + + +
#[event]
+struct RegisterValidatorCandidate has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `SetOperatorEvent` + + + +
struct SetOperatorEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_operator: address +
+
+ +
+
+new_operator: address +
+
+ +
+
+ + +
+ + + +## Struct `SetOperator` + + + +
#[event]
+struct SetOperator has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_operator: address +
+
+ +
+
+new_operator: address +
+
+ +
+
+ + +
+ + + +## Struct `AddStakeEvent` + + + +
struct AddStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount_added: u64 +
+
+ +
+
+ + +
+ + + +## Struct `AddStake` + + + +
#[event]
+struct AddStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount_added: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ReactivateStakeEvent` + + + +
struct ReactivateStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ReactivateStake` + + + +
#[event]
+struct ReactivateStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `RotateConsensusKeyEvent` + + + +
struct RotateConsensusKeyEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_consensus_pubkey: vector<u8> +
+
+ +
+
+new_consensus_pubkey: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `RotateConsensusKey` + + + +
#[event]
+struct RotateConsensusKey has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_consensus_pubkey: vector<u8> +
+
+ +
+
+new_consensus_pubkey: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `UpdateNetworkAndFullnodeAddressesEvent` + + + +
struct UpdateNetworkAndFullnodeAddressesEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_network_addresses: vector<u8> +
+
+ +
+
+new_network_addresses: vector<u8> +
+
+ +
+
+old_fullnode_addresses: vector<u8> +
+
+ +
+
+new_fullnode_addresses: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `UpdateNetworkAndFullnodeAddresses` + + + +
#[event]
+struct UpdateNetworkAndFullnodeAddresses has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_network_addresses: vector<u8> +
+
+ +
+
+new_network_addresses: vector<u8> +
+
+ +
+
+old_fullnode_addresses: vector<u8> +
+
+ +
+
+new_fullnode_addresses: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `IncreaseLockupEvent` + + + +
struct IncreaseLockupEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_locked_until_secs: u64 +
+
+ +
+
+new_locked_until_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `IncreaseLockup` + + + +
#[event]
+struct IncreaseLockup has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+old_locked_until_secs: u64 +
+
+ +
+
+new_locked_until_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `JoinValidatorSetEvent` + + + +
struct JoinValidatorSetEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `JoinValidatorSet` + + + +
#[event]
+struct JoinValidatorSet has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `DistributeRewardsEvent` + + + +
struct DistributeRewardsEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+rewards_amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DistributeRewards` + + + +
#[event]
+struct DistributeRewards has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+rewards_amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UnlockStakeEvent` + + + +
struct UnlockStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount_unlocked: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UnlockStake` + + + +
#[event]
+struct UnlockStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount_unlocked: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawStakeEvent` + + + +
struct WithdrawStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount_withdrawn: u64 +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawStake` + + + +
#[event]
+struct WithdrawStake has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+amount_withdrawn: u64 +
+
+ +
+
+ + +
+ + + +## Struct `LeaveValidatorSetEvent` + + + +
struct LeaveValidatorSetEvent has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `LeaveValidatorSet` + + + +
#[event]
+struct LeaveValidatorSet has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Resource `ValidatorFees` + +Stores transaction fees assigned to validators. All fees are distributed to validators +at the end of the epoch. + + +
struct ValidatorFees has key
+
+ + + +
+Fields + + +
+
+fees_table: table::Table<address, coin::Coin<aptos_coin::AptosCoin>> +
+
+ +
+
+ + +
+ + + +## Resource `AllowedValidators` + +This provides an ACL for Testnet purposes. In testnet, everyone is a whale, a whale can be a validator. +This allows a testnet to bring additional entities into the validator set without compromising the +security of the testnet. This will NOT be enabled in Mainnet. + + +
struct AllowedValidators has key
+
+ + + +
+Fields + + +
+
+accounts: vector<address> +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$ghost_valid_perf` + + + +
struct Ghost$ghost_valid_perf has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: stake::ValidatorPerformance +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$ghost_proposer_idx` + + + +
struct Ghost$ghost_proposer_idx has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: option::Option<u64> +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$ghost_active_num` + + + +
struct Ghost$ghost_active_num has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: u64 +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$ghost_pending_inactive_num` + + + +
struct Ghost$ghost_pending_inactive_num has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + +Account is already registered as a validator candidate. + + +
const EALREADY_REGISTERED: u64 = 8;
+
+ + + + + +Limit the maximum value of rewards_rate in order to avoid any arithmetic overflow. + + +
const MAX_REWARDS_RATE: u64 = 1000000;
+
+ + + + + +Account is already a validator or pending validator. + + +
const EALREADY_ACTIVE_VALIDATOR: u64 = 4;
+
+ + + + + +Table to store collected transaction fees for each validator already exists. + + +
const EFEES_TABLE_ALREADY_EXISTS: u64 = 19;
+
+ + + + + +Validator is not defined in the ACL of entities allowed to be validators + + +
const EINELIGIBLE_VALIDATOR: u64 = 17;
+
+ + + + + +Cannot update stake pool's lockup to earlier than current lockup. + + +
const EINVALID_LOCKUP: u64 = 18;
+
+ + + + + +Invalid consensus public key + + +
const EINVALID_PUBLIC_KEY: u64 = 11;
+
+ + + + + +Can't remove last validator. + + +
const ELAST_VALIDATOR: u64 = 6;
+
+ + + + + +Account does not have the right operator capability. + + +
const ENOT_OPERATOR: u64 = 9;
+
+ + + + + +Account is not a validator. + + +
const ENOT_VALIDATOR: u64 = 5;
+
+ + + + + +Validators cannot join or leave post genesis on this test network. + + +
const ENO_POST_GENESIS_VALIDATOR_SET_CHANGE_ALLOWED: u64 = 10;
+
+ + + + + +An account cannot own more than one owner capability. + + +
const EOWNER_CAP_ALREADY_EXISTS: u64 = 16;
+
+ + + + + +Owner capability does not exist at the provided account. + + +
const EOWNER_CAP_NOT_FOUND: u64 = 15;
+
+ + + + + +Validator set change temporarily disabled because of in-progress reconfiguration. + + +
const ERECONFIGURATION_IN_PROGRESS: u64 = 20;
+
+ + + + + +Total stake exceeds maximum allowed. + + +
const ESTAKE_EXCEEDS_MAX: u64 = 7;
+
+ + + + + +Stake pool does not exist at the provided pool address. + + +
const ESTAKE_POOL_DOES_NOT_EXIST: u64 = 14;
+
+ + + + + +Too much stake to join validator set. + + +
const ESTAKE_TOO_HIGH: u64 = 3;
+
+ + + + + +Not enough stake to join validator set. + + +
const ESTAKE_TOO_LOW: u64 = 2;
+
+ + + + + +Validator Config not published. + + +
const EVALIDATOR_CONFIG: u64 = 1;
+
+ + + + + +Validator set exceeds the limit + + +
const EVALIDATOR_SET_TOO_LARGE: u64 = 12;
+
+ + + + + +Voting power increase has exceeded the limit for this current epoch. + + +
const EVOTING_POWER_INCREASE_EXCEEDS_LIMIT: u64 = 13;
+
+ + + + + +Limit the maximum size to u16::max, it's the current limit of the bitvec +https://github.com/aptos-labs/aptos-core/blob/main/crates/aptos-bitvec/src/lib.rs#L20 + + +
const MAX_VALIDATOR_SET_SIZE: u64 = 65536;
+
+ + + + + + + +
const VALIDATOR_STATUS_ACTIVE: u64 = 2;
+
+ + + + + + + +
const VALIDATOR_STATUS_INACTIVE: u64 = 4;
+
+ + + + + +Validator status enum. We can switch to proper enum later once Move supports it. + + +
const VALIDATOR_STATUS_PENDING_ACTIVE: u64 = 1;
+
+ + + + + + + +
const VALIDATOR_STATUS_PENDING_INACTIVE: u64 = 3;
+
+ + + + + +## Function `initialize_validator_fees` + +Initializes the resource storing information about collected transaction fees per validator. +Used by transaction_fee.move to initialize fee collection and distribution. + + +
public(friend) fun initialize_validator_fees(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize_validator_fees(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        !exists<ValidatorFees>(@aptos_framework),
+        error::already_exists(EFEES_TABLE_ALREADY_EXISTS)
+    );
+    move_to(aptos_framework, ValidatorFees { fees_table: table::new() });
+}
+
+ + + +
+ + + +## Function `add_transaction_fee` + +Stores the transaction fee collected to the specified validator address. + + +
public(friend) fun add_transaction_fee(validator_addr: address, fee: coin::Coin<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public(friend) fun add_transaction_fee(validator_addr: address, fee: Coin<AptosCoin>) acquires ValidatorFees {
+    let fees_table = &mut borrow_global_mut<ValidatorFees>(@aptos_framework).fees_table;
+    if (table::contains(fees_table, validator_addr)) {
+        let collected_fee = table::borrow_mut(fees_table, validator_addr);
+        coin::merge(collected_fee, fee);
+    } else {
+        table::add(fees_table, validator_addr, fee);
+    }
+}
+
+ + + +
+ + + +## Function `get_lockup_secs` + +Return the lockup expiration of the stake pool at pool_address. +This will throw an error if there's no stake pool at pool_address. + + +
#[view]
+public fun get_lockup_secs(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun get_lockup_secs(pool_address: address): u64 acquires StakePool {
+    assert_stake_pool_exists(pool_address);
+    borrow_global<StakePool>(pool_address).locked_until_secs
+}
+
+ + + +
+ + + +## Function `get_remaining_lockup_secs` + +Return the remaining lockup of the stake pool at pool_address. +This will throw an error if there's no stake pool at pool_address. + + +
#[view]
+public fun get_remaining_lockup_secs(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_lockup_secs(pool_address: address): u64 acquires StakePool {
+    assert_stake_pool_exists(pool_address);
+    let lockup_time = borrow_global<StakePool>(pool_address).locked_until_secs;
+    if (lockup_time <= timestamp::now_seconds()) {
+        0
+    } else {
+        lockup_time - timestamp::now_seconds()
+    }
+}
+
+ + + +
+ + + +## Function `get_stake` + +Return the different stake amounts for pool_address (whether the validator is active or not). +The returned amounts are for (active, inactive, pending_active, pending_inactive) stake respectively. + + +
#[view]
+public fun get_stake(pool_address: address): (u64, u64, u64, u64)
+
+ + + +
+Implementation + + +
public fun get_stake(pool_address: address): (u64, u64, u64, u64) acquires StakePool {
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global<StakePool>(pool_address);
+    (
+        coin::value(&stake_pool.active),
+        coin::value(&stake_pool.inactive),
+        coin::value(&stake_pool.pending_active),
+        coin::value(&stake_pool.pending_inactive),
+    )
+}
+
+ + + +
+ + + +## Function `get_validator_state` + +Returns the validator's state. + + +
#[view]
+public fun get_validator_state(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun get_validator_state(pool_address: address): u64 acquires ValidatorSet {
+    let validator_set = borrow_global<ValidatorSet>(@aptos_framework);
+    if (option::is_some(&find_validator(&validator_set.pending_active, pool_address))) {
+        VALIDATOR_STATUS_PENDING_ACTIVE
+    } else if (option::is_some(&find_validator(&validator_set.active_validators, pool_address))) {
+        VALIDATOR_STATUS_ACTIVE
+    } else if (option::is_some(&find_validator(&validator_set.pending_inactive, pool_address))) {
+        VALIDATOR_STATUS_PENDING_INACTIVE
+    } else {
+        VALIDATOR_STATUS_INACTIVE
+    }
+}
+
+ + + +
+ + + +## Function `get_current_epoch_voting_power` + +Return the voting power of the validator in the current epoch. +This is the same as the validator's total active and pending_inactive stake. + + +
#[view]
+public fun get_current_epoch_voting_power(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun get_current_epoch_voting_power(pool_address: address): u64 acquires StakePool, ValidatorSet {
+    assert_stake_pool_exists(pool_address);
+    let validator_state = get_validator_state(pool_address);
+    // Both active and pending inactive validators can still vote in the current epoch.
+    if (validator_state == VALIDATOR_STATUS_ACTIVE || validator_state == VALIDATOR_STATUS_PENDING_INACTIVE) {
+        let active_stake = coin::value(&borrow_global<StakePool>(pool_address).active);
+        let pending_inactive_stake = coin::value(&borrow_global<StakePool>(pool_address).pending_inactive);
+        active_stake + pending_inactive_stake
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `get_delegated_voter` + +Return the delegated voter of the validator at pool_address. + + +
#[view]
+public fun get_delegated_voter(pool_address: address): address
+
+ + + +
+Implementation + + +
public fun get_delegated_voter(pool_address: address): address acquires StakePool {
+    assert_stake_pool_exists(pool_address);
+    borrow_global<StakePool>(pool_address).delegated_voter
+}
+
+ + + +
+ + + +## Function `get_operator` + +Return the operator of the validator at pool_address. + + +
#[view]
+public fun get_operator(pool_address: address): address
+
+ + + +
+Implementation + + +
public fun get_operator(pool_address: address): address acquires StakePool {
+    assert_stake_pool_exists(pool_address);
+    borrow_global<StakePool>(pool_address).operator_address
+}
+
+ + + +
+ + + +## Function `get_owned_pool_address` + +Return the pool address in owner_cap. + + +
public fun get_owned_pool_address(owner_cap: &stake::OwnerCapability): address
+
+ + + +
+Implementation + + +
public fun get_owned_pool_address(owner_cap: &OwnerCapability): address {
+    owner_cap.pool_address
+}
+
+ + + +
+ + + +## Function `get_validator_index` + +Return the validator index for pool_address. + + +
#[view]
+public fun get_validator_index(pool_address: address): u64
+
+ + + +
+Implementation + + +
public fun get_validator_index(pool_address: address): u64 acquires ValidatorConfig {
+    assert_stake_pool_exists(pool_address);
+    borrow_global<ValidatorConfig>(pool_address).validator_index
+}
+
+ + + +
+ + + +## Function `get_current_epoch_proposal_counts` + +Return the number of successful and failed proposals for the proposal at the given validator index. + + +
#[view]
+public fun get_current_epoch_proposal_counts(validator_index: u64): (u64, u64)
+
+ + + +
+Implementation + + +
public fun get_current_epoch_proposal_counts(validator_index: u64): (u64, u64) acquires ValidatorPerformance {
+    let validator_performances = &borrow_global<ValidatorPerformance>(@aptos_framework).validators;
+    let validator_performance = vector::borrow(validator_performances, validator_index);
+    (validator_performance.successful_proposals, validator_performance.failed_proposals)
+}
+
+ + + +
+ + + +## Function `get_validator_config` + +Return the validator's config. + + +
#[view]
+public fun get_validator_config(pool_address: address): (vector<u8>, vector<u8>, vector<u8>)
+
+ + + +
+Implementation + + +
public fun get_validator_config(
+    pool_address: address
+): (vector<u8>, vector<u8>, vector<u8>) acquires ValidatorConfig {
+    assert_stake_pool_exists(pool_address);
+    let validator_config = borrow_global<ValidatorConfig>(pool_address);
+    (validator_config.consensus_pubkey, validator_config.network_addresses, validator_config.fullnode_addresses)
+}
+
+ + + +
+ + + +## Function `stake_pool_exists` + + + +
#[view]
+public fun stake_pool_exists(addr: address): bool
+
+ + + +
+Implementation + + +
public fun stake_pool_exists(addr: address): bool {
+    exists<StakePool>(addr)
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize validator set to the core resource account. + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    move_to(aptos_framework, ValidatorSet {
+        consensus_scheme: 0,
+        active_validators: vector::empty(),
+        pending_active: vector::empty(),
+        pending_inactive: vector::empty(),
+        total_voting_power: 0,
+        total_joining_power: 0,
+    });
+
+    move_to(aptos_framework, ValidatorPerformance {
+        validators: vector::empty(),
+    });
+}
+
+ + + +
+ + + +## Function `store_aptos_coin_mint_cap` + +This is only called during Genesis, which is where MintCapability can be created. +Beyond genesis, no one can create AptosCoin mint/burn capabilities. + + +
public(friend) fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public(friend) fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: MintCapability<AptosCoin>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(aptos_framework, AptosCoinCapabilities { mint_cap })
+}
+
+ + + +
+ + + +## Function `remove_validators` + +Allow on chain governance to remove validators from the validator set. + + +
public fun remove_validators(aptos_framework: &signer, validators: &vector<address>)
+
+ + + +
+Implementation + + +
public fun remove_validators(
+    aptos_framework: &signer,
+    validators: &vector<address>,
+) acquires ValidatorSet {
+    assert_reconfig_not_in_progress();
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    let active_validators = &mut validator_set.active_validators;
+    let pending_inactive = &mut validator_set.pending_inactive;
+    spec {
+        update ghost_active_num = len(active_validators);
+        update ghost_pending_inactive_num = len(pending_inactive);
+    };
+    let len_validators = vector::length(validators);
+    let i = 0;
+    // Remove each validator from the validator set.
+    while ({
+        spec {
+            invariant i <= len_validators;
+            invariant spec_validators_are_initialized(active_validators);
+            invariant spec_validator_indices_are_valid(active_validators);
+            invariant spec_validators_are_initialized(pending_inactive);
+            invariant spec_validator_indices_are_valid(pending_inactive);
+            invariant ghost_active_num + ghost_pending_inactive_num == len(active_validators) + len(pending_inactive);
+        };
+        i < len_validators
+    }) {
+        let validator = *vector::borrow(validators, i);
+        let validator_index = find_validator(active_validators, validator);
+        if (option::is_some(&validator_index)) {
+            let validator_info = vector::swap_remove(active_validators, *option::borrow(&validator_index));
+            vector::push_back(pending_inactive, validator_info);
+            spec {
+                update ghost_active_num = ghost_active_num - 1;
+                update ghost_pending_inactive_num = ghost_pending_inactive_num + 1;
+            };
+        };
+        i = i + 1;
+    };
+}
+
+ + + +
+ + + +## Function `initialize_stake_owner` + +Initialize the validator account and give ownership to the signing account +except it leaves the ValidatorConfig to be set by another entity. +Note: this triggers setting the operator and owner, set it to the account's address +to set later. + + +
public entry fun initialize_stake_owner(owner: &signer, initial_stake_amount: u64, operator: address, voter: address)
+
+ + + +
+Implementation + + +
public entry fun initialize_stake_owner(
+    owner: &signer,
+    initial_stake_amount: u64,
+    operator: address,
+    voter: address,
+) acquires AllowedValidators, OwnerCapability, StakePool, ValidatorSet {
+    initialize_owner(owner);
+    move_to(owner, ValidatorConfig {
+        consensus_pubkey: vector::empty(),
+        network_addresses: vector::empty(),
+        fullnode_addresses: vector::empty(),
+        validator_index: 0,
+    });
+
+    if (initial_stake_amount > 0) {
+        add_stake(owner, initial_stake_amount);
+    };
+
+    let account_address = signer::address_of(owner);
+    if (account_address != operator) {
+        set_operator(owner, operator)
+    };
+    if (account_address != voter) {
+        set_delegated_voter(owner, voter)
+    };
+}
+
+ + + +
+ + + +## Function `initialize_validator` + +Initialize the validator account and give ownership to the signing account. + + +
public entry fun initialize_validator(account: &signer, consensus_pubkey: vector<u8>, proof_of_possession: vector<u8>, network_addresses: vector<u8>, fullnode_addresses: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun initialize_validator(
+    account: &signer,
+    consensus_pubkey: vector<u8>,
+    proof_of_possession: vector<u8>,
+    network_addresses: vector<u8>,
+    fullnode_addresses: vector<u8>,
+) acquires AllowedValidators {
+    // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks.
+    let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop(
+        consensus_pubkey,
+        &proof_of_possession_from_bytes(proof_of_possession)
+    );
+    assert!(option::is_some(pubkey_from_pop), error::invalid_argument(EINVALID_PUBLIC_KEY));
+
+    initialize_owner(account);
+    move_to(account, ValidatorConfig {
+        consensus_pubkey,
+        network_addresses,
+        fullnode_addresses,
+        validator_index: 0,
+    });
+}
+
+ + + +
+ + + +## Function `initialize_owner` + + + +
fun initialize_owner(owner: &signer)
+
+ + + +
+Implementation + + +
fun initialize_owner(owner: &signer) acquires AllowedValidators {
+    let owner_address = signer::address_of(owner);
+    assert!(is_allowed(owner_address), error::not_found(EINELIGIBLE_VALIDATOR));
+    assert!(!stake_pool_exists(owner_address), error::already_exists(EALREADY_REGISTERED));
+
+    move_to(owner, StakePool {
+        active: coin::zero<AptosCoin>(),
+        pending_active: coin::zero<AptosCoin>(),
+        pending_inactive: coin::zero<AptosCoin>(),
+        inactive: coin::zero<AptosCoin>(),
+        locked_until_secs: 0,
+        operator_address: owner_address,
+        delegated_voter: owner_address,
+        // Events.
+        initialize_validator_events: account::new_event_handle<RegisterValidatorCandidateEvent>(owner),
+        set_operator_events: account::new_event_handle<SetOperatorEvent>(owner),
+        add_stake_events: account::new_event_handle<AddStakeEvent>(owner),
+        reactivate_stake_events: account::new_event_handle<ReactivateStakeEvent>(owner),
+        rotate_consensus_key_events: account::new_event_handle<RotateConsensusKeyEvent>(owner),
+        update_network_and_fullnode_addresses_events: account::new_event_handle<UpdateNetworkAndFullnodeAddressesEvent>(
+            owner
+        ),
+        increase_lockup_events: account::new_event_handle<IncreaseLockupEvent>(owner),
+        join_validator_set_events: account::new_event_handle<JoinValidatorSetEvent>(owner),
+        distribute_rewards_events: account::new_event_handle<DistributeRewardsEvent>(owner),
+        unlock_stake_events: account::new_event_handle<UnlockStakeEvent>(owner),
+        withdraw_stake_events: account::new_event_handle<WithdrawStakeEvent>(owner),
+        leave_validator_set_events: account::new_event_handle<LeaveValidatorSetEvent>(owner),
+    });
+
+    move_to(owner, OwnerCapability { pool_address: owner_address });
+}
+
+ + + +
+ + + +## Function `extract_owner_cap` + +Extract and return owner capability from the signing account. + + +
public fun extract_owner_cap(owner: &signer): stake::OwnerCapability
+
+ + + +
+Implementation + + +
public fun extract_owner_cap(owner: &signer): OwnerCapability acquires OwnerCapability {
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    move_from<OwnerCapability>(owner_address)
+}
+
+ + + +
+ + + +## Function `deposit_owner_cap` + +Deposit owner_cap into account. This requires account to not already have ownership of another +staking pool. + + +
public fun deposit_owner_cap(owner: &signer, owner_cap: stake::OwnerCapability)
+
+ + + +
+Implementation + + +
public fun deposit_owner_cap(owner: &signer, owner_cap: OwnerCapability) {
+    assert!(!exists<OwnerCapability>(signer::address_of(owner)), error::not_found(EOWNER_CAP_ALREADY_EXISTS));
+    move_to(owner, owner_cap);
+}
+
+ + + +
+ + + +## Function `destroy_owner_cap` + +Destroy owner_cap. + + +
public fun destroy_owner_cap(owner_cap: stake::OwnerCapability)
+
+ + + +
+Implementation + + +
public fun destroy_owner_cap(owner_cap: OwnerCapability) {
+    let OwnerCapability { pool_address: _ } = owner_cap;
+}
+
+ + + +
+ + + +## Function `set_operator` + +Allows an owner to change the operator of the stake pool. + + +
public entry fun set_operator(owner: &signer, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun set_operator(owner: &signer, new_operator: address) acquires OwnerCapability, StakePool {
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    set_operator_with_cap(ownership_cap, new_operator);
+}
+
+ + + +
+ + + +## Function `set_operator_with_cap` + +Allows an account with ownership capability to change the operator of the stake pool. + + +
public fun set_operator_with_cap(owner_cap: &stake::OwnerCapability, new_operator: address)
+
+ + + +
+Implementation + + +
public fun set_operator_with_cap(owner_cap: &OwnerCapability, new_operator: address) acquires StakePool {
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    let old_operator = stake_pool.operator_address;
+    stake_pool.operator_address = new_operator;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            SetOperator {
+                pool_address,
+                old_operator,
+                new_operator,
+            },
+        );
+    };
+
+    event::emit_event(
+        &mut stake_pool.set_operator_events,
+        SetOperatorEvent {
+            pool_address,
+            old_operator,
+            new_operator,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `set_delegated_voter` + +Allows an owner to change the delegated voter of the stake pool. + + +
public entry fun set_delegated_voter(owner: &signer, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun set_delegated_voter(owner: &signer, new_voter: address) acquires OwnerCapability, StakePool {
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    set_delegated_voter_with_cap(ownership_cap, new_voter);
+}
+
+ + + +
+ + + +## Function `set_delegated_voter_with_cap` + +Allows an owner to change the delegated voter of the stake pool. + + +
public fun set_delegated_voter_with_cap(owner_cap: &stake::OwnerCapability, new_voter: address)
+
+ + + +
+Implementation + + +
public fun set_delegated_voter_with_cap(owner_cap: &OwnerCapability, new_voter: address) acquires StakePool {
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    stake_pool.delegated_voter = new_voter;
+}
+
+ + + +
+ + + +## Function `add_stake` + +Add amount of coins from the account owning the StakePool. + + +
public entry fun add_stake(owner: &signer, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun add_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool, ValidatorSet {
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    add_stake_with_cap(ownership_cap, coin::withdraw<AptosCoin>(owner, amount));
+}
+
+ + + +
+ + + +## Function `add_stake_with_cap` + +Add coins into pool_address. this requires the corresponding owner_cap to be passed in. + + +
public fun add_stake_with_cap(owner_cap: &stake::OwnerCapability, coins: coin::Coin<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public fun add_stake_with_cap(owner_cap: &OwnerCapability, coins: Coin<AptosCoin>) acquires StakePool, ValidatorSet {
+    assert_reconfig_not_in_progress();
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+
+    let amount = coin::value(&coins);
+    if (amount == 0) {
+        coin::destroy_zero(coins);
+        return
+    };
+
+    // Only track and validate voting power increase for active and pending_active validator.
+    // Pending_inactive validator will be removed from the validator set in the next epoch.
+    // Inactive validator's total stake will be tracked when they join the validator set.
+    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    // Search directly rather using get_validator_state to save on unnecessary loops.
+    if (option::is_some(&find_validator(&validator_set.active_validators, pool_address)) ||
+        option::is_some(&find_validator(&validator_set.pending_active, pool_address))) {
+        update_voting_power_increase(amount);
+    };
+
+    // Add to pending_active if it's a current validator because the stake is not counted until the next epoch.
+    // Otherwise, the delegation can be added to active directly as the validator is also activated in the epoch.
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    if (is_current_epoch_validator(pool_address)) {
+        coin::merge<AptosCoin>(&mut stake_pool.pending_active, coins);
+    } else {
+        coin::merge<AptosCoin>(&mut stake_pool.active, coins);
+    };
+
+    let (_, maximum_stake) = staking_config::get_required_stake(&staking_config::get());
+    let voting_power = get_next_epoch_voting_power(stake_pool);
+    assert!(voting_power <= maximum_stake, error::invalid_argument(ESTAKE_EXCEEDS_MAX));
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            AddStake {
+                pool_address,
+                amount_added: amount,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.add_stake_events,
+        AddStakeEvent {
+            pool_address,
+            amount_added: amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `reactivate_stake` + +Move amount of coins from pending_inactive to active. + + +
public entry fun reactivate_stake(owner: &signer, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun reactivate_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool {
+    assert_reconfig_not_in_progress();
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    reactivate_stake_with_cap(ownership_cap, amount);
+}
+
+ + + +
+ + + +## Function `reactivate_stake_with_cap` + + + +
public fun reactivate_stake_with_cap(owner_cap: &stake::OwnerCapability, amount: u64)
+
+ + + +
+Implementation + + +
public fun reactivate_stake_with_cap(owner_cap: &OwnerCapability, amount: u64) acquires StakePool {
+    assert_reconfig_not_in_progress();
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+
+    // Cap the amount to reactivate by the amount in pending_inactive.
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    let total_pending_inactive = coin::value(&stake_pool.pending_inactive);
+    amount = min(amount, total_pending_inactive);
+
+    // Since this does not count as a voting power change (pending inactive still counts as voting power in the
+    // current epoch), stake can be immediately moved from pending inactive to active.
+    // We also don't need to check voting power increase as there's none.
+    let reactivated_coins = coin::extract(&mut stake_pool.pending_inactive, amount);
+    coin::merge(&mut stake_pool.active, reactivated_coins);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            ReactivateStake {
+                pool_address,
+                amount,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.reactivate_stake_events,
+        ReactivateStakeEvent {
+            pool_address,
+            amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `rotate_consensus_key` + +Rotate the consensus key of the validator, it'll take effect in next epoch. + + +
public entry fun rotate_consensus_key(operator: &signer, pool_address: address, new_consensus_pubkey: vector<u8>, proof_of_possession: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun rotate_consensus_key(
+    operator: &signer,
+    pool_address: address,
+    new_consensus_pubkey: vector<u8>,
+    proof_of_possession: vector<u8>,
+) acquires StakePool, ValidatorConfig {
+    assert_reconfig_not_in_progress();
+    assert_stake_pool_exists(pool_address);
+
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
+
+    assert!(exists<ValidatorConfig>(pool_address), error::not_found(EVALIDATOR_CONFIG));
+    let validator_info = borrow_global_mut<ValidatorConfig>(pool_address);
+    let old_consensus_pubkey = validator_info.consensus_pubkey;
+    // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks.
+    let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop(
+        new_consensus_pubkey,
+        &proof_of_possession_from_bytes(proof_of_possession)
+    );
+    assert!(option::is_some(pubkey_from_pop), error::invalid_argument(EINVALID_PUBLIC_KEY));
+    validator_info.consensus_pubkey = new_consensus_pubkey;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            RotateConsensusKey {
+                pool_address,
+                old_consensus_pubkey,
+                new_consensus_pubkey,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.rotate_consensus_key_events,
+        RotateConsensusKeyEvent {
+            pool_address,
+            old_consensus_pubkey,
+            new_consensus_pubkey,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `update_network_and_fullnode_addresses` + +Update the network and full node addresses of the validator. This only takes effect in the next epoch. + + +
public entry fun update_network_and_fullnode_addresses(operator: &signer, pool_address: address, new_network_addresses: vector<u8>, new_fullnode_addresses: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun update_network_and_fullnode_addresses(
+    operator: &signer,
+    pool_address: address,
+    new_network_addresses: vector<u8>,
+    new_fullnode_addresses: vector<u8>,
+) acquires StakePool, ValidatorConfig {
+    assert_reconfig_not_in_progress();
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
+    assert!(exists<ValidatorConfig>(pool_address), error::not_found(EVALIDATOR_CONFIG));
+    let validator_info = borrow_global_mut<ValidatorConfig>(pool_address);
+    let old_network_addresses = validator_info.network_addresses;
+    validator_info.network_addresses = new_network_addresses;
+    let old_fullnode_addresses = validator_info.fullnode_addresses;
+    validator_info.fullnode_addresses = new_fullnode_addresses;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            UpdateNetworkAndFullnodeAddresses {
+                pool_address,
+                old_network_addresses,
+                new_network_addresses,
+                old_fullnode_addresses,
+                new_fullnode_addresses,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.update_network_and_fullnode_addresses_events,
+        UpdateNetworkAndFullnodeAddressesEvent {
+            pool_address,
+            old_network_addresses,
+            new_network_addresses,
+            old_fullnode_addresses,
+            new_fullnode_addresses,
+        },
+    );
+
+}
+
+ + + +
+ + + +## Function `increase_lockup` + +Similar to increase_lockup_with_cap but will use ownership capability from the signing account. + + +
public entry fun increase_lockup(owner: &signer)
+
+ + + +
+Implementation + + +
public entry fun increase_lockup(owner: &signer) acquires OwnerCapability, StakePool {
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    increase_lockup_with_cap(ownership_cap);
+}
+
+ + + +
+ + + +## Function `increase_lockup_with_cap` + +Unlock from active delegation, it's moved to pending_inactive if locked_until_secs < current_time or +directly inactive if it's not from an active validator. + + +
public fun increase_lockup_with_cap(owner_cap: &stake::OwnerCapability)
+
+ + + +
+Implementation + + +
public fun increase_lockup_with_cap(owner_cap: &OwnerCapability) acquires StakePool {
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+    let config = staking_config::get();
+
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    let old_locked_until_secs = stake_pool.locked_until_secs;
+    let new_locked_until_secs = timestamp::now_seconds() + staking_config::get_recurring_lockup_duration(&config);
+    assert!(old_locked_until_secs < new_locked_until_secs, error::invalid_argument(EINVALID_LOCKUP));
+    stake_pool.locked_until_secs = new_locked_until_secs;
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            IncreaseLockup {
+                pool_address,
+                old_locked_until_secs,
+                new_locked_until_secs,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.increase_lockup_events,
+        IncreaseLockupEvent {
+            pool_address,
+            old_locked_until_secs,
+            new_locked_until_secs,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `join_validator_set` + +This can only called by the operator of the validator/staking pool. + + +
public entry fun join_validator_set(operator: &signer, pool_address: address)
+
+ + + +
+Implementation + + +
public entry fun join_validator_set(
+    operator: &signer,
+    pool_address: address
+) acquires StakePool, ValidatorConfig, ValidatorSet {
+    assert!(
+        staking_config::get_allow_validator_set_change(&staking_config::get()),
+        error::invalid_argument(ENO_POST_GENESIS_VALIDATOR_SET_CHANGE_ALLOWED),
+    );
+
+    join_validator_set_internal(operator, pool_address);
+}
+
+ + + +
+ + + +## Function `join_validator_set_internal` + +Request to have pool_address join the validator set. Can only be called after calling initialize_validator. +If the validator has the required stake (more than minimum and less than maximum allowed), they will be +added to the pending_active queue. All validators in this queue will be added to the active set when the next +epoch starts (eligibility will be rechecked). + +This internal version can only be called by the Genesis module during Genesis. + + +
public(friend) fun join_validator_set_internal(operator: &signer, pool_address: address)
+
+ + + +
+Implementation + + +
public(friend) fun join_validator_set_internal(
+    operator: &signer,
+    pool_address: address
+) acquires StakePool, ValidatorConfig, ValidatorSet {
+    assert_reconfig_not_in_progress();
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
+    assert!(
+        get_validator_state(pool_address) == VALIDATOR_STATUS_INACTIVE,
+        error::invalid_state(EALREADY_ACTIVE_VALIDATOR),
+    );
+
+    let config = staking_config::get();
+    let (minimum_stake, maximum_stake) = staking_config::get_required_stake(&config);
+    let voting_power = get_next_epoch_voting_power(stake_pool);
+    assert!(voting_power >= minimum_stake, error::invalid_argument(ESTAKE_TOO_LOW));
+    assert!(voting_power <= maximum_stake, error::invalid_argument(ESTAKE_TOO_HIGH));
+
+    // Track and validate voting power increase.
+    update_voting_power_increase(voting_power);
+
+    // Add validator to pending_active, to be activated in the next epoch.
+    let validator_config = borrow_global_mut<ValidatorConfig>(pool_address);
+    assert!(!vector::is_empty(&validator_config.consensus_pubkey), error::invalid_argument(EINVALID_PUBLIC_KEY));
+
+    // Validate the current validator set size has not exceeded the limit.
+    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    vector::push_back(
+        &mut validator_set.pending_active,
+        generate_validator_info(pool_address, stake_pool, *validator_config)
+    );
+    let validator_set_size = vector::length(&validator_set.active_validators) + vector::length(
+        &validator_set.pending_active
+    );
+    assert!(validator_set_size <= MAX_VALIDATOR_SET_SIZE, error::invalid_argument(EVALIDATOR_SET_TOO_LARGE));
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(JoinValidatorSet { pool_address });
+    };
+    event::emit_event(
+        &mut stake_pool.join_validator_set_events,
+        JoinValidatorSetEvent { pool_address },
+    );
+}
+
+ + + +
+ + + +## Function `unlock` + +Similar to unlock_with_cap but will use ownership capability from the signing account. + + +
public entry fun unlock(owner: &signer, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun unlock(owner: &signer, amount: u64) acquires OwnerCapability, StakePool {
+    assert_reconfig_not_in_progress();
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    unlock_with_cap(amount, ownership_cap);
+}
+
+ + + +
+ + + +## Function `unlock_with_cap` + +Unlock amount from the active stake. Only possible if the lockup has expired. + + +
public fun unlock_with_cap(amount: u64, owner_cap: &stake::OwnerCapability)
+
+ + + +
+Implementation + + +
public fun unlock_with_cap(amount: u64, owner_cap: &OwnerCapability) acquires StakePool {
+    assert_reconfig_not_in_progress();
+    // Short-circuit if amount to unlock is 0 so we don't emit events.
+    if (amount == 0) {
+        return
+    };
+
+    // Unlocked coins are moved to pending_inactive. When the current lockup cycle expires, they will be moved into
+    // inactive in the earliest possible epoch transition.
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    // Cap amount to unlock by maximum active stake.
+    let amount = min(amount, coin::value(&stake_pool.active));
+    let unlocked_stake = coin::extract(&mut stake_pool.active, amount);
+    coin::merge<AptosCoin>(&mut stake_pool.pending_inactive, unlocked_stake);
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            UnlockStake {
+                pool_address,
+                amount_unlocked: amount,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.unlock_stake_events,
+        UnlockStakeEvent {
+            pool_address,
+            amount_unlocked: amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `withdraw` + +Withdraw from account's inactive stake. + + +
public entry fun withdraw(owner: &signer, withdraw_amount: u64)
+
+ + + +
+Implementation + + +
public entry fun withdraw(
+    owner: &signer,
+    withdraw_amount: u64
+) acquires OwnerCapability, StakePool, ValidatorSet {
+    let owner_address = signer::address_of(owner);
+    assert_owner_cap_exists(owner_address);
+    let ownership_cap = borrow_global<OwnerCapability>(owner_address);
+    let coins = withdraw_with_cap(ownership_cap, withdraw_amount);
+    coin::deposit<AptosCoin>(owner_address, coins);
+}
+
+ + + +
+ + + +## Function `withdraw_with_cap` + +Withdraw from pool_address's inactive stake with the corresponding owner_cap. + + +
public fun withdraw_with_cap(owner_cap: &stake::OwnerCapability, withdraw_amount: u64): coin::Coin<aptos_coin::AptosCoin>
+
+ + + +
+Implementation + + +
public fun withdraw_with_cap(
+    owner_cap: &OwnerCapability,
+    withdraw_amount: u64
+): Coin<AptosCoin> acquires StakePool, ValidatorSet {
+    assert_reconfig_not_in_progress();
+    let pool_address = owner_cap.pool_address;
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    // There's an edge case where a validator unlocks their stake and leaves the validator set before
+    // the stake is fully unlocked (the current lockup cycle has not expired yet).
+    // This can leave their stake stuck in pending_inactive even after the current lockup cycle expires.
+    if (get_validator_state(pool_address) == VALIDATOR_STATUS_INACTIVE &&
+        timestamp::now_seconds() >= stake_pool.locked_until_secs) {
+        let pending_inactive_stake = coin::extract_all(&mut stake_pool.pending_inactive);
+        coin::merge(&mut stake_pool.inactive, pending_inactive_stake);
+    };
+
+    // Cap withdraw amount by total inactive coins.
+    withdraw_amount = min(withdraw_amount, coin::value(&stake_pool.inactive));
+    if (withdraw_amount == 0) return coin::zero<AptosCoin>();
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            WithdrawStake {
+                pool_address,
+                amount_withdrawn: withdraw_amount,
+            },
+        );
+    };
+    event::emit_event(
+        &mut stake_pool.withdraw_stake_events,
+        WithdrawStakeEvent {
+            pool_address,
+            amount_withdrawn: withdraw_amount,
+        },
+    );
+
+    coin::extract(&mut stake_pool.inactive, withdraw_amount)
+}
+
+ + + +
+ + + +## Function `leave_validator_set` + +Request to have pool_address leave the validator set. The validator is only actually removed from the set when +the next epoch starts. +The last validator in the set cannot leave. This is an edge case that should never happen as long as the network +is still operational. + +Can only be called by the operator of the validator/staking pool. + + +
public entry fun leave_validator_set(operator: &signer, pool_address: address)
+
+ + + +
+Implementation + + +
public entry fun leave_validator_set(
+    operator: &signer,
+    pool_address: address
+) acquires StakePool, ValidatorSet {
+    assert_reconfig_not_in_progress();
+    let config = staking_config::get();
+    assert!(
+        staking_config::get_allow_validator_set_change(&config),
+        error::invalid_argument(ENO_POST_GENESIS_VALIDATOR_SET_CHANGE_ALLOWED),
+    );
+
+    assert_stake_pool_exists(pool_address);
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    // Account has to be the operator.
+    assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
+
+    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    // If the validator is still pending_active, directly kick the validator out.
+    let maybe_pending_active_index = find_validator(&validator_set.pending_active, pool_address);
+    if (option::is_some(&maybe_pending_active_index)) {
+        vector::swap_remove(
+            &mut validator_set.pending_active, option::extract(&mut maybe_pending_active_index));
+
+        // Decrease the voting power increase as the pending validator's voting power was added when they requested
+        // to join. Now that they changed their mind, their voting power should not affect the joining limit of this
+        // epoch.
+        let validator_stake = (get_next_epoch_voting_power(stake_pool) as u128);
+        // total_joining_power should be larger than validator_stake but just in case there has been a small
+        // rounding error somewhere that can lead to an underflow, we still want to allow this transaction to
+        // succeed.
+        if (validator_set.total_joining_power > validator_stake) {
+            validator_set.total_joining_power = validator_set.total_joining_power - validator_stake;
+        } else {
+            validator_set.total_joining_power = 0;
+        };
+    } else {
+        // Validate that the validator is already part of the validator set.
+        let maybe_active_index = find_validator(&validator_set.active_validators, pool_address);
+        assert!(option::is_some(&maybe_active_index), error::invalid_state(ENOT_VALIDATOR));
+        let validator_info = vector::swap_remove(
+            &mut validator_set.active_validators, option::extract(&mut maybe_active_index));
+        assert!(vector::length(&validator_set.active_validators) > 0, error::invalid_state(ELAST_VALIDATOR));
+        vector::push_back(&mut validator_set.pending_inactive, validator_info);
+
+        if (std::features::module_event_migration_enabled()) {
+            event::emit(LeaveValidatorSet { pool_address });
+        };
+        event::emit_event(
+            &mut stake_pool.leave_validator_set_events,
+            LeaveValidatorSetEvent {
+                pool_address,
+            },
+        );
+    };
+}
+
+ + + +
+ + + +## Function `is_current_epoch_validator` + +Returns true if the current validator can still vote in the current epoch. +This includes validators that requested to leave but are still in the pending_inactive queue and will be removed +when the epoch starts. + + +
public fun is_current_epoch_validator(pool_address: address): bool
+
+ + + +
+Implementation + + +
public fun is_current_epoch_validator(pool_address: address): bool acquires ValidatorSet {
+    assert_stake_pool_exists(pool_address);
+    let validator_state = get_validator_state(pool_address);
+    validator_state == VALIDATOR_STATUS_ACTIVE || validator_state == VALIDATOR_STATUS_PENDING_INACTIVE
+}
+
+ + + +
+ + + +## Function `update_performance_statistics` + +Update the validator performance (proposal statistics). This is only called by block::prologue(). +This function cannot abort. + + +
public(friend) fun update_performance_statistics(proposer_index: option::Option<u64>, failed_proposer_indices: vector<u64>)
+
+ + + +
+Implementation + + +
public(friend) fun update_performance_statistics(
+    proposer_index: Option<u64>,
+    failed_proposer_indices: vector<u64>
+) acquires ValidatorPerformance {
+    // Validator set cannot change until the end of the epoch, so the validator index in arguments should
+    // match with those of the validators in ValidatorPerformance resource.
+    let validator_perf = borrow_global_mut<ValidatorPerformance>(@aptos_framework);
+    let validator_len = vector::length(&validator_perf.validators);
+
+    spec {
+        update ghost_valid_perf = validator_perf;
+        update ghost_proposer_idx = proposer_index;
+    };
+    // proposer_index is an option because it can be missing (for NilBlocks)
+    if (option::is_some(&proposer_index)) {
+        let cur_proposer_index = option::extract(&mut proposer_index);
+        // Here, and in all other vector::borrow, skip any validator indices that are out of bounds,
+        // this ensures that this function doesn't abort if there are out of bounds errors.
+        if (cur_proposer_index < validator_len) {
+            let validator = vector::borrow_mut(&mut validator_perf.validators, cur_proposer_index);
+            spec {
+                assume validator.successful_proposals + 1 <= MAX_U64;
+            };
+            validator.successful_proposals = validator.successful_proposals + 1;
+        };
+    };
+
+    let f = 0;
+    let f_len = vector::length(&failed_proposer_indices);
+    while ({
+        spec {
+            invariant len(validator_perf.validators) == validator_len;
+            invariant (option::spec_is_some(ghost_proposer_idx) && option::spec_borrow(
+                ghost_proposer_idx
+            ) < validator_len) ==>
+                (validator_perf.validators[option::spec_borrow(ghost_proposer_idx)].successful_proposals ==
+                    ghost_valid_perf.validators[option::spec_borrow(ghost_proposer_idx)].successful_proposals + 1);
+        };
+        f < f_len
+    }) {
+        let validator_index = *vector::borrow(&failed_proposer_indices, f);
+        if (validator_index < validator_len) {
+            let validator = vector::borrow_mut(&mut validator_perf.validators, validator_index);
+            spec {
+                assume validator.failed_proposals + 1 <= MAX_U64;
+            };
+            validator.failed_proposals = validator.failed_proposals + 1;
+        };
+        f = f + 1;
+    };
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Triggered during a reconfiguration. This function shouldn't abort. + +1. Distribute transaction fees and rewards to stake pools of active and pending inactive validators (requested +to leave but not yet removed). +2. Officially move pending active stake to active and move pending inactive stake to inactive. +The staking pool's voting power in this new epoch will be updated to the total active stake. +3. Add pending active validators to the active set if they satisfy requirements so they can vote and remove +pending inactive validators so they no longer can vote. +4. The validator's voting power in the validator set is updated to be the corresponding staking pool's voting +power. + + +
public(friend) fun on_new_epoch()
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(
+) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees {
+    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    let config = staking_config::get();
+    let validator_perf = borrow_global_mut<ValidatorPerformance>(@aptos_framework);
+
+    // Process pending stake and distribute transaction fees and rewards for each currently active validator.
+    vector::for_each_ref(&validator_set.active_validators, |validator| {
+        let validator: &ValidatorInfo = validator;
+        update_stake_pool(validator_perf, validator.addr, &config);
+    });
+
+    // Process pending stake and distribute transaction fees and rewards for each currently pending_inactive validator
+    // (requested to leave but not removed yet).
+    vector::for_each_ref(&validator_set.pending_inactive, |validator| {
+        let validator: &ValidatorInfo = validator;
+        update_stake_pool(validator_perf, validator.addr, &config);
+    });
+
+    // Activate currently pending_active validators.
+    append(&mut validator_set.active_validators, &mut validator_set.pending_active);
+
+    // Officially deactivate all pending_inactive validators. They will now no longer receive rewards.
+    validator_set.pending_inactive = vector::empty();
+
+    // Update active validator set so that network address/public key change takes effect.
+    // Moreover, recalculate the total voting power, and deactivate the validator whose
+    // voting power is less than the minimum required stake.
+    let next_epoch_validators = vector::empty();
+    let (minimum_stake, _) = staking_config::get_required_stake(&config);
+    let vlen = vector::length(&validator_set.active_validators);
+    let total_voting_power = 0;
+    let i = 0;
+    while ({
+        spec {
+            invariant spec_validators_are_initialized(next_epoch_validators);
+            invariant i <= vlen;
+        };
+        i < vlen
+    }) {
+        let old_validator_info = vector::borrow_mut(&mut validator_set.active_validators, i);
+        let pool_address = old_validator_info.addr;
+        let validator_config = borrow_global_mut<ValidatorConfig>(pool_address);
+        let stake_pool = borrow_global_mut<StakePool>(pool_address);
+        let new_validator_info = generate_validator_info(pool_address, stake_pool, *validator_config);
+
+        // A validator needs at least the min stake required to join the validator set.
+        if (new_validator_info.voting_power >= minimum_stake) {
+            spec {
+                assume total_voting_power + new_validator_info.voting_power <= MAX_U128;
+            };
+            total_voting_power = total_voting_power + (new_validator_info.voting_power as u128);
+            vector::push_back(&mut next_epoch_validators, new_validator_info);
+        };
+        i = i + 1;
+    };
+
+    validator_set.active_validators = next_epoch_validators;
+    validator_set.total_voting_power = total_voting_power;
+    validator_set.total_joining_power = 0;
+
+    // Update validator indices, reset performance scores, and renew lockups.
+    validator_perf.validators = vector::empty();
+    let recurring_lockup_duration_secs = staking_config::get_recurring_lockup_duration(&config);
+    let vlen = vector::length(&validator_set.active_validators);
+    let validator_index = 0;
+    while ({
+        spec {
+            invariant spec_validators_are_initialized(validator_set.active_validators);
+            invariant len(validator_set.pending_active) == 0;
+            invariant len(validator_set.pending_inactive) == 0;
+            invariant 0 <= validator_index && validator_index <= vlen;
+            invariant vlen == len(validator_set.active_validators);
+            invariant forall i in 0..validator_index:
+                global<ValidatorConfig>(validator_set.active_validators[i].addr).validator_index < validator_index;
+            invariant forall i in 0..validator_index:
+                validator_set.active_validators[i].config.validator_index < validator_index;
+            invariant len(validator_perf.validators) == validator_index;
+        };
+        validator_index < vlen
+    }) {
+        let validator_info = vector::borrow_mut(&mut validator_set.active_validators, validator_index);
+        validator_info.config.validator_index = validator_index;
+        let validator_config = borrow_global_mut<ValidatorConfig>(validator_info.addr);
+        validator_config.validator_index = validator_index;
+
+        vector::push_back(&mut validator_perf.validators, IndividualValidatorPerformance {
+            successful_proposals: 0,
+            failed_proposals: 0,
+        });
+
+        // Automatically renew a validator's lockup for validators that will still be in the validator set in the
+        // next epoch.
+        let stake_pool = borrow_global_mut<StakePool>(validator_info.addr);
+        let now_secs = timestamp::now_seconds();
+        let reconfig_start_secs = if (chain_status::is_operating()) {
+            get_reconfig_start_time_secs()
+        } else {
+            now_secs
+        };
+        if (stake_pool.locked_until_secs <= reconfig_start_secs) {
+            spec {
+                assume now_secs + recurring_lockup_duration_secs <= MAX_U64;
+            };
+            stake_pool.locked_until_secs = now_secs + recurring_lockup_duration_secs;
+        };
+
+        validator_index = validator_index + 1;
+    };
+
+    if (features::periodical_reward_rate_decrease_enabled()) {
+        // Update rewards rate after reward distribution.
+        staking_config::calculate_and_save_latest_epoch_rewards_rate();
+    };
+}
+
+ + + +
+ + + +## Function `cur_validator_consensus_infos` + +Return the ValidatorConsensusInfo of each current validator, sorted by current validator index. + + +
public fun cur_validator_consensus_infos(): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + +
+Implementation + + +
public fun cur_validator_consensus_infos(): vector<ValidatorConsensusInfo> acquires ValidatorSet {
+    let validator_set = borrow_global<ValidatorSet>(@aptos_framework);
+    validator_consensus_infos_from_validator_set(validator_set)
+}
+
+ + + +
+ + + +## Function `next_validator_consensus_infos` + + + +
public fun next_validator_consensus_infos(): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + +
+Implementation + + +
public fun next_validator_consensus_infos(): vector<ValidatorConsensusInfo> acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorFees, ValidatorConfig {
+    // Init.
+    let cur_validator_set = borrow_global<ValidatorSet>(@aptos_framework);
+    let staking_config = staking_config::get();
+    let validator_perf = borrow_global<ValidatorPerformance>(@aptos_framework);
+    let (minimum_stake, _) = staking_config::get_required_stake(&staking_config);
+    let (rewards_rate, rewards_rate_denominator) = staking_config::get_reward_rate(&staking_config);
+
+    // Compute new validator set.
+    let new_active_validators = vector[];
+    let num_new_actives = 0;
+    let candidate_idx = 0;
+    let new_total_power = 0;
+    let num_cur_actives = vector::length(&cur_validator_set.active_validators);
+    let num_cur_pending_actives = vector::length(&cur_validator_set.pending_active);
+    spec {
+        assume num_cur_actives + num_cur_pending_actives <= MAX_U64;
+    };
+    let num_candidates = num_cur_actives + num_cur_pending_actives;
+    while ({
+        spec {
+            invariant candidate_idx <= num_candidates;
+            invariant spec_validators_are_initialized(new_active_validators);
+            invariant len(new_active_validators) == num_new_actives;
+            invariant forall i in 0..len(new_active_validators):
+                new_active_validators[i].config.validator_index == i;
+            invariant num_new_actives <= candidate_idx;
+            invariant spec_validators_are_initialized(new_active_validators);
+        };
+        candidate_idx < num_candidates
+    }) {
+        let candidate_in_current_validator_set = candidate_idx < num_cur_actives;
+        let candidate = if (candidate_idx < num_cur_actives) {
+            vector::borrow(&cur_validator_set.active_validators, candidate_idx)
+        } else {
+            vector::borrow(&cur_validator_set.pending_active, candidate_idx - num_cur_actives)
+        };
+        let stake_pool = borrow_global<StakePool>(candidate.addr);
+        let cur_active = coin::value(&stake_pool.active);
+        let cur_pending_active = coin::value(&stake_pool.pending_active);
+        let cur_pending_inactive = coin::value(&stake_pool.pending_inactive);
+
+        let cur_reward = if (candidate_in_current_validator_set && cur_active > 0) {
+            spec {
+                assert candidate.config.validator_index < len(validator_perf.validators);
+            };
+            let cur_perf = vector::borrow(&validator_perf.validators, candidate.config.validator_index);
+            spec {
+                assume cur_perf.successful_proposals + cur_perf.failed_proposals <= MAX_U64;
+            };
+            calculate_rewards_amount(cur_active, cur_perf.successful_proposals, cur_perf.successful_proposals + cur_perf.failed_proposals, rewards_rate, rewards_rate_denominator)
+        } else {
+            0
+        };
+
+        let cur_fee = 0;
+        if (features::collect_and_distribute_gas_fees()) {
+            let fees_table = &borrow_global<ValidatorFees>(@aptos_framework).fees_table;
+            if (table::contains(fees_table, candidate.addr)) {
+                let fee_coin = table::borrow(fees_table, candidate.addr);
+                cur_fee = coin::value(fee_coin);
+            }
+        };
+
+        let lockup_expired = get_reconfig_start_time_secs() >= stake_pool.locked_until_secs;
+        spec {
+            assume cur_active + cur_pending_active + cur_reward + cur_fee <= MAX_U64;
+            assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward + cur_fee <= MAX_U64;
+        };
+        let new_voting_power =
+            cur_active
+            + if (lockup_expired) { 0 } else { cur_pending_inactive }
+            + cur_pending_active
+            + cur_reward + cur_fee;
+
+        if (new_voting_power >= minimum_stake) {
+            let config = *borrow_global<ValidatorConfig>(candidate.addr);
+            config.validator_index = num_new_actives;
+            let new_validator_info = ValidatorInfo {
+                addr: candidate.addr,
+                voting_power: new_voting_power,
+                config,
+            };
+
+            // Update ValidatorSet.
+            spec {
+                assume new_total_power + new_voting_power <= MAX_U128;
+            };
+            new_total_power = new_total_power + (new_voting_power as u128);
+            vector::push_back(&mut new_active_validators, new_validator_info);
+            num_new_actives = num_new_actives + 1;
+
+        };
+        candidate_idx = candidate_idx + 1;
+    };
+
+    let new_validator_set = ValidatorSet {
+        consensus_scheme: cur_validator_set.consensus_scheme,
+        active_validators: new_active_validators,
+        pending_inactive: vector[],
+        pending_active: vector[],
+        total_voting_power: new_total_power,
+        total_joining_power: 0,
+    };
+
+    validator_consensus_infos_from_validator_set(&new_validator_set)
+}
+
+ + + +
+ + + +## Function `validator_consensus_infos_from_validator_set` + + + +
fun validator_consensus_infos_from_validator_set(validator_set: &stake::ValidatorSet): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + +
+Implementation + + +
fun validator_consensus_infos_from_validator_set(validator_set: &ValidatorSet): vector<ValidatorConsensusInfo> {
+    let validator_consensus_infos = vector[];
+
+    let num_active = vector::length(&validator_set.active_validators);
+    let num_pending_inactive = vector::length(&validator_set.pending_inactive);
+    spec {
+        assume num_active + num_pending_inactive <= MAX_U64;
+    };
+    let total = num_active + num_pending_inactive;
+
+    // Pre-fill the return value with dummy values.
+    let idx = 0;
+    while ({
+        spec {
+            invariant idx <= len(validator_set.active_validators) + len(validator_set.pending_inactive);
+            invariant len(validator_consensus_infos) == idx;
+            invariant len(validator_consensus_infos) <= len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        };
+        idx < total
+    }) {
+        vector::push_back(&mut validator_consensus_infos, validator_consensus_info::default());
+        idx = idx + 1;
+    };
+    spec {
+        assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        assert spec_validator_indices_are_valid_config(validator_set.active_validators,
+            len(validator_set.active_validators) + len(validator_set.pending_inactive));
+    };
+
+    vector::for_each_ref(&validator_set.active_validators, |obj| {
+        let vi: &ValidatorInfo = obj;
+        spec {
+            assume len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+            assert vi.config.validator_index < len(validator_consensus_infos);
+        };
+        let vci = vector::borrow_mut(&mut validator_consensus_infos, vi.config.validator_index);
+        *vci = validator_consensus_info::new(
+            vi.addr,
+            vi.config.consensus_pubkey,
+            vi.voting_power
+        );
+        spec {
+            assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        };
+    });
+
+    vector::for_each_ref(&validator_set.pending_inactive, |obj| {
+        let vi: &ValidatorInfo = obj;
+        spec {
+            assume len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+            assert vi.config.validator_index < len(validator_consensus_infos);
+        };
+        let vci = vector::borrow_mut(&mut validator_consensus_infos, vi.config.validator_index);
+        *vci = validator_consensus_info::new(
+            vi.addr,
+            vi.config.consensus_pubkey,
+            vi.voting_power
+        );
+        spec {
+            assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        };
+    });
+
+    validator_consensus_infos
+}
+
+ + + +
+ + + +## Function `addresses_from_validator_infos` + + + +
fun addresses_from_validator_infos(infos: &vector<stake::ValidatorInfo>): vector<address>
+
+ + + +
+Implementation + + +
fun addresses_from_validator_infos(infos: &vector<ValidatorInfo>): vector<address> {
+    vector::map_ref(infos, |obj| {
+        let info: &ValidatorInfo = obj;
+        info.addr
+    })
+}
+
+ + + +
+ + + +## Function `update_stake_pool` + +Calculate the stake amount of a stake pool for the next epoch. +Update individual validator's stake pool if commit == true. + +1. distribute transaction fees to active/pending_inactive delegations +2. distribute rewards to active/pending_inactive delegations +3. process pending_active, pending_inactive correspondingly +This function shouldn't abort. + + +
fun update_stake_pool(validator_perf: &stake::ValidatorPerformance, pool_address: address, staking_config: &staking_config::StakingConfig)
+
+ + + +
+Implementation + + +
fun update_stake_pool(
+    validator_perf: &ValidatorPerformance,
+    pool_address: address,
+    staking_config: &StakingConfig,
+) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorFees {
+    let stake_pool = borrow_global_mut<StakePool>(pool_address);
+    let validator_config = borrow_global<ValidatorConfig>(pool_address);
+    let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_config.validator_index);
+    let num_successful_proposals = cur_validator_perf.successful_proposals;
+    spec {
+        // The following addition should not overflow because `num_total_proposals` cannot be larger than 86400,
+        // the maximum number of proposals in a day (1 proposal per second).
+        assume cur_validator_perf.successful_proposals + cur_validator_perf.failed_proposals <= MAX_U64;
+    };
+    let num_total_proposals = cur_validator_perf.successful_proposals + cur_validator_perf.failed_proposals;
+    let (rewards_rate, rewards_rate_denominator) = staking_config::get_reward_rate(staking_config);
+    let rewards_active = distribute_rewards(
+        &mut stake_pool.active,
+        num_successful_proposals,
+        num_total_proposals,
+        rewards_rate,
+        rewards_rate_denominator
+    );
+    let rewards_pending_inactive = distribute_rewards(
+        &mut stake_pool.pending_inactive,
+        num_successful_proposals,
+        num_total_proposals,
+        rewards_rate,
+        rewards_rate_denominator
+    );
+    spec {
+        assume rewards_active + rewards_pending_inactive <= MAX_U64;
+    };
+    let rewards_amount = rewards_active + rewards_pending_inactive;
+    // Pending active stake can now be active.
+    coin::merge(&mut stake_pool.active, coin::extract_all(&mut stake_pool.pending_active));
+
+    // Additionally, distribute transaction fees.
+    if (features::collect_and_distribute_gas_fees()) {
+        let fees_table = &mut borrow_global_mut<ValidatorFees>(@aptos_framework).fees_table;
+        if (table::contains(fees_table, pool_address)) {
+            let coin = table::remove(fees_table, pool_address);
+            coin::merge(&mut stake_pool.active, coin);
+        };
+    };
+
+    // Pending inactive stake is only fully unlocked and moved into inactive if the current lockup cycle has expired
+    let current_lockup_expiration = stake_pool.locked_until_secs;
+    if (get_reconfig_start_time_secs() >= current_lockup_expiration) {
+        coin::merge(
+            &mut stake_pool.inactive,
+            coin::extract_all(&mut stake_pool.pending_inactive),
+        );
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(DistributeRewards { pool_address, rewards_amount });
+    };
+    event::emit_event(
+        &mut stake_pool.distribute_rewards_events,
+        DistributeRewardsEvent {
+            pool_address,
+            rewards_amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `get_reconfig_start_time_secs` + +Assuming we are in a middle of a reconfiguration (no matter it is immediate or async), get its start time. + + +
fun get_reconfig_start_time_secs(): u64
+
+ + + +
+Implementation + + +
fun get_reconfig_start_time_secs(): u64 {
+    if (reconfiguration_state::is_initialized()) {
+        reconfiguration_state::start_time_secs()
+    } else {
+        timestamp::now_seconds()
+    }
+}
+
+ + + +
+ + + +## Function `calculate_rewards_amount` + +Calculate the rewards amount. + + +
fun calculate_rewards_amount(stake_amount: u64, num_successful_proposals: u64, num_total_proposals: u64, rewards_rate: u64, rewards_rate_denominator: u64): u64
+
+ + + +
+Implementation + + +
fun calculate_rewards_amount(
+    stake_amount: u64,
+    num_successful_proposals: u64,
+    num_total_proposals: u64,
+    rewards_rate: u64,
+    rewards_rate_denominator: u64,
+): u64 {
+    spec {
+        // The following condition must hold because
+        // (1) num_successful_proposals <= num_total_proposals, and
+        // (2) `num_total_proposals` cannot be larger than 86400, the maximum number of proposals
+        //     in a day (1 proposal per second), and `num_total_proposals` is reset to 0 every epoch.
+        assume num_successful_proposals * MAX_REWARDS_RATE <= MAX_U64;
+    };
+    // The rewards amount is equal to (stake amount * rewards rate * performance multiplier).
+    // We do multiplication in u128 before division to avoid the overflow and minimize the rounding error.
+    let rewards_numerator = (stake_amount as u128) * (rewards_rate as u128) * (num_successful_proposals as u128);
+    let rewards_denominator = (rewards_rate_denominator as u128) * (num_total_proposals as u128);
+    if (rewards_denominator > 0) {
+        ((rewards_numerator / rewards_denominator) as u64)
+    } else {
+        0
+    }
+}
+
+ + + +
+ + + +## Function `distribute_rewards` + +Mint rewards corresponding to current epoch's stake and num_successful_votes. + + +
fun distribute_rewards(stake: &mut coin::Coin<aptos_coin::AptosCoin>, num_successful_proposals: u64, num_total_proposals: u64, rewards_rate: u64, rewards_rate_denominator: u64): u64
+
+ + + +
+Implementation + + +
fun distribute_rewards(
+    stake: &mut Coin<AptosCoin>,
+    num_successful_proposals: u64,
+    num_total_proposals: u64,
+    rewards_rate: u64,
+    rewards_rate_denominator: u64,
+): u64 acquires AptosCoinCapabilities {
+    let stake_amount = coin::value(stake);
+    let rewards_amount = if (stake_amount > 0) {
+        calculate_rewards_amount(
+            stake_amount,
+            num_successful_proposals,
+            num_total_proposals,
+            rewards_rate,
+            rewards_rate_denominator
+        )
+    } else {
+        0
+    };
+    if (rewards_amount > 0) {
+        let mint_cap = &borrow_global<AptosCoinCapabilities>(@aptos_framework).mint_cap;
+        let rewards = coin::mint(rewards_amount, mint_cap);
+        coin::merge(stake, rewards);
+    };
+    rewards_amount
+}
+
+ + + +
+ + + +## Function `append` + + + +
fun append<T>(v1: &mut vector<T>, v2: &mut vector<T>)
+
+ + + +
+Implementation + + +
fun append<T>(v1: &mut vector<T>, v2: &mut vector<T>) {
+    while (!vector::is_empty(v2)) {
+        vector::push_back(v1, vector::pop_back(v2));
+    }
+}
+
+ + + +
+ + + +## Function `find_validator` + + + +
fun find_validator(v: &vector<stake::ValidatorInfo>, addr: address): option::Option<u64>
+
+ + + +
+Implementation + + +
fun find_validator(v: &vector<ValidatorInfo>, addr: address): Option<u64> {
+    let i = 0;
+    let len = vector::length(v);
+    while ({
+        spec {
+            invariant !(exists j in 0..i: v[j].addr == addr);
+        };
+        i < len
+    }) {
+        if (vector::borrow(v, i).addr == addr) {
+            return option::some(i)
+        };
+        i = i + 1;
+    };
+    option::none()
+}
+
+ + + +
+ + + +## Function `generate_validator_info` + + + +
fun generate_validator_info(addr: address, stake_pool: &stake::StakePool, config: stake::ValidatorConfig): stake::ValidatorInfo
+
+ + + +
+Implementation + + +
fun generate_validator_info(addr: address, stake_pool: &StakePool, config: ValidatorConfig): ValidatorInfo {
+    let voting_power = get_next_epoch_voting_power(stake_pool);
+    ValidatorInfo {
+        addr,
+        voting_power,
+        config,
+    }
+}
+
+ + + +
+ + + +## Function `get_next_epoch_voting_power` + +Returns validator's next epoch voting power, including pending_active, active, and pending_inactive stake. + + +
fun get_next_epoch_voting_power(stake_pool: &stake::StakePool): u64
+
+ + + +
+Implementation + + +
fun get_next_epoch_voting_power(stake_pool: &StakePool): u64 {
+    let value_pending_active = coin::value(&stake_pool.pending_active);
+    let value_active = coin::value(&stake_pool.active);
+    let value_pending_inactive = coin::value(&stake_pool.pending_inactive);
+    spec {
+        assume value_pending_active + value_active + value_pending_inactive <= MAX_U64;
+    };
+    value_pending_active + value_active + value_pending_inactive
+}
+
+ + + +
+ + + +## Function `update_voting_power_increase` + + + +
fun update_voting_power_increase(increase_amount: u64)
+
+ + + +
+Implementation + + +
fun update_voting_power_increase(increase_amount: u64) acquires ValidatorSet {
+    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    let voting_power_increase_limit =
+        (staking_config::get_voting_power_increase_limit(&staking_config::get()) as u128);
+    validator_set.total_joining_power = validator_set.total_joining_power + (increase_amount as u128);
+
+    // Only validator voting power increase if the current validator set's voting power > 0.
+    if (validator_set.total_voting_power > 0) {
+        assert!(
+            validator_set.total_joining_power <= validator_set.total_voting_power * voting_power_increase_limit / 100,
+            error::invalid_argument(EVOTING_POWER_INCREASE_EXCEEDS_LIMIT),
+        );
+    }
+}
+
+ + + +
+ + + +## Function `assert_stake_pool_exists` + + + +
fun assert_stake_pool_exists(pool_address: address)
+
+ + + +
+Implementation + + +
fun assert_stake_pool_exists(pool_address: address) {
+    assert!(stake_pool_exists(pool_address), error::invalid_argument(ESTAKE_POOL_DOES_NOT_EXIST));
+}
+
+ + + +
+ + + +## Function `configure_allowed_validators` + + + +
public fun configure_allowed_validators(aptos_framework: &signer, accounts: vector<address>)
+
+ + + +
+Implementation + + +
public fun configure_allowed_validators(
+    aptos_framework: &signer,
+    accounts: vector<address>
+) acquires AllowedValidators {
+    let aptos_framework_address = signer::address_of(aptos_framework);
+    system_addresses::assert_aptos_framework(aptos_framework);
+    if (!exists<AllowedValidators>(aptos_framework_address)) {
+        move_to(aptos_framework, AllowedValidators { accounts });
+    } else {
+        let allowed = borrow_global_mut<AllowedValidators>(aptos_framework_address);
+        allowed.accounts = accounts;
+    }
+}
+
+ + + +
+ + + +## Function `is_allowed` + + + +
fun is_allowed(account: address): bool
+
+ + + +
+Implementation + + +
fun is_allowed(account: address): bool acquires AllowedValidators {
+    if (!exists<AllowedValidators>(@aptos_framework)) {
+        true
+    } else {
+        let allowed = borrow_global<AllowedValidators>(@aptos_framework);
+        vector::contains(&allowed.accounts, &account)
+    }
+}
+
+ + + +
+ + + +## Function `assert_owner_cap_exists` + + + +
fun assert_owner_cap_exists(owner: address)
+
+ + + +
+Implementation + + +
fun assert_owner_cap_exists(owner: address) {
+    assert!(exists<OwnerCapability>(owner), error::not_found(EOWNER_CAP_NOT_FOUND));
+}
+
+ + + +
+ + + +## Function `assert_reconfig_not_in_progress` + + + +
fun assert_reconfig_not_in_progress()
+
+ + + +
+Implementation + + +
fun assert_reconfig_not_in_progress() {
+    assert!(!reconfiguration_state::is_in_progress(), error::invalid_state(ERECONFIGURATION_IN_PROGRESS));
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The validator set resource stores consensus information for each validator. The consensus scheme remains consistent across all validators within the set.LowThe consensus_scheme attribute within ValidatorSet initializes with the value zero during the module's initialization and its value remains unchanged afterward.Formally verified by the data invariant of ValidatorSet.
2The owner of a validator is immutable.LowDuring the initialization of a validator, the owner attribute becomes the signer's address. This assignment establishes the signer as the owner and controller of the validator entity. Subsequently, the owner attribute remains unchanged throughout the validator's lifespan, maintaining its assigned value without any modifications.Formally verified in the schema ValidatorOwnerNoChange.
3The total staked value in the stake pool should remain constant, excluding operations related to adding and withdrawing.LowThe total staked value (AptosCoin) of a stake pool is grouped by: active, inactive, pending_active, and pending_inactive. The stake value remains constant except during the execution of the add_stake_with_cap or withdraw_with_cap functions or on_new_epoch (which distributes the reward).Formally specified in the schema StakedValueNoChange.
4During each epoch, the following operations should be consistently performed without aborting: rewards distribution, validator activation/deactivation, updates to validator sets and voting power, and renewal of lockups.LowThe on_new_epoch function is triggered at each epoch boundary to perform distribution of the transaction fee, updates to active/inactive stakes, updates to pending active/inactive validators and adjusts voting power of the validators without aborting.Formally verified via on_new_epoch. This also requires a manual review to verify the state updates of the stake pool.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+invariant [suspendable] exists<ValidatorSet>(@aptos_framework) ==> validator_set_is_valid();
+invariant [suspendable] chain_status::is_operating() ==> exists<AptosCoinCapabilities>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==> exists<ValidatorPerformance>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==> exists<ValidatorSet>(@aptos_framework);
+apply ValidatorOwnerNoChange to *;
+apply ValidatorNotChangeDuringReconfig to * except on_new_epoch;
+apply StakePoolNotChangeDuringReconfig to * except on_new_epoch, update_stake_pool;
+
+global ghost_valid_perf: ValidatorPerformance;
+
+global ghost_proposer_idx: Option<u64>;
+
+global ghost_active_num: u64;
+
+global ghost_pending_inactive_num: u64;
+
+ + + + + +### Resource `ValidatorSet` + + +
struct ValidatorSet has copy, drop, store, key
+
+ + + +
+
+consensus_scheme: u8 +
+
+ +
+
+active_validators: vector<stake::ValidatorInfo> +
+
+ +
+
+pending_inactive: vector<stake::ValidatorInfo> +
+
+ +
+
+pending_active: vector<stake::ValidatorInfo> +
+
+ +
+
+total_voting_power: u128 +
+
+ +
+
+total_joining_power: u128 +
+
+ +
+
+ + + +
// This enforces high-level requirement 1:
+invariant consensus_scheme == 0;
+
+ + + + + + + +
schema ValidatorNotChangeDuringReconfig {
+    ensures (reconfiguration_state::spec_is_in_progress() && old(exists<ValidatorSet>(@aptos_framework))) ==>
+        old(global<ValidatorSet>(@aptos_framework)) == global<ValidatorSet>(@aptos_framework);
+}
+
+ + + + + + + +
schema StakePoolNotChangeDuringReconfig {
+    ensures forall a: address where old(exists<StakePool>(a)): reconfiguration_state::spec_is_in_progress() ==>
+        (old(global<StakePool>(a).pending_inactive) == global<StakePool>(a).pending_inactive &&
+        old(global<StakePool>(a).pending_active) == global<StakePool>(a).pending_active &&
+        old(global<StakePool>(a).inactive) == global<StakePool>(a).inactive &&
+        old(global<StakePool>(a).active) == global<StakePool>(a).active);
+}
+
+ + + + + + + +
schema ValidatorOwnerNoChange {
+    // This enforces high-level requirement 2:
+    ensures forall addr: address where old(exists<OwnerCapability>(addr)):
+        old(global<OwnerCapability>(addr)).pool_address == global<OwnerCapability>(addr).pool_address;
+}
+
+ + + + + + + +
schema StakedValueNochange {
+    pool_address: address;
+    let stake_pool = global<StakePool>(pool_address);
+    let post post_stake_pool = global<StakePool>(pool_address);
+    // This enforces high-level requirement 3:
+    ensures stake_pool.active.value + stake_pool.inactive.value + stake_pool.pending_active.value + stake_pool.pending_inactive.value ==
+        post_stake_pool.active.value + post_stake_pool.inactive.value + post_stake_pool.pending_active.value + post_stake_pool.pending_inactive.value;
+}
+
+ + + + + + + +
fun validator_set_is_valid(): bool {
+   let validator_set = global<ValidatorSet>(@aptos_framework);
+   validator_set_is_valid_impl(validator_set)
+}
+
+ + + + + + + +
fun validator_set_is_valid_impl(validator_set: ValidatorSet): bool {
+   spec_validators_are_initialized(validator_set.active_validators) &&
+       spec_validators_are_initialized(validator_set.pending_inactive) &&
+       spec_validators_are_initialized(validator_set.pending_active) &&
+       spec_validator_indices_are_valid(validator_set.active_validators) &&
+       spec_validator_indices_are_valid(validator_set.pending_inactive)
+       && spec_validator_indices_active_pending_inactive(validator_set)
+}
+
+ + + + + +### Function `initialize_validator_fees` + + +
public(friend) fun initialize_validator_fees(aptos_framework: &signer)
+
+ + + + +
let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<ValidatorFees>(aptos_addr);
+ensures exists<ValidatorFees>(aptos_addr);
+
+ + + + + +### Function `add_transaction_fee` + + +
public(friend) fun add_transaction_fee(validator_addr: address, fee: coin::Coin<aptos_coin::AptosCoin>)
+
+ + + + +
aborts_if !exists<ValidatorFees>(@aptos_framework);
+let fees_table = global<ValidatorFees>(@aptos_framework).fees_table;
+let post post_fees_table = global<ValidatorFees>(@aptos_framework).fees_table;
+let collected_fee = table::spec_get(fees_table, validator_addr);
+let post post_collected_fee = table::spec_get(post_fees_table, validator_addr);
+ensures if (table::spec_contains(fees_table, validator_addr)) {
+    post_collected_fee.value == collected_fee.value + fee.value
+} else {
+    table::spec_contains(post_fees_table, validator_addr) &&
+    table::spec_get(post_fees_table, validator_addr) == fee
+};
+
+ + + + + +### Function `get_validator_state` + + +
#[view]
+public fun get_validator_state(pool_address: address): u64
+
+ + + + +
aborts_if !exists<ValidatorSet>(@aptos_framework);
+let validator_set = global<ValidatorSet>(@aptos_framework);
+ensures result == VALIDATOR_STATUS_PENDING_ACTIVE ==> spec_contains(validator_set.pending_active, pool_address);
+ensures result == VALIDATOR_STATUS_ACTIVE ==> spec_contains(validator_set.active_validators, pool_address);
+ensures result == VALIDATOR_STATUS_PENDING_INACTIVE ==> spec_contains(validator_set.pending_inactive, pool_address);
+ensures result == VALIDATOR_STATUS_INACTIVE ==> (
+    !spec_contains(validator_set.pending_active, pool_address)
+        && !spec_contains(validator_set.active_validators, pool_address)
+        && !spec_contains(validator_set.pending_inactive, pool_address)
+);
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + + + +
pragma disable_invariants_in_body;
+let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<ValidatorSet>(aptos_addr);
+aborts_if exists<ValidatorPerformance>(aptos_addr);
+ensures exists<ValidatorSet>(aptos_addr);
+ensures global<ValidatorSet>(aptos_addr).consensus_scheme == 0;
+ensures exists<ValidatorPerformance>(aptos_addr);
+
+ + + + + +### Function `remove_validators` + + +
public fun remove_validators(aptos_framework: &signer, validators: &vector<address>)
+
+ + + + +
requires chain_status::is_operating();
+let validator_set = global<ValidatorSet>(@aptos_framework);
+let post post_validator_set = global<ValidatorSet>(@aptos_framework);
+let active_validators = validator_set.active_validators;
+let post post_active_validators = post_validator_set.active_validators;
+let pending_inactive_validators = validator_set.pending_inactive;
+let post post_pending_inactive_validators = post_validator_set.pending_inactive;
+invariant len(active_validators) > 0;
+ensures len(active_validators) + len(pending_inactive_validators) == len(post_active_validators)
+    + len(post_pending_inactive_validators);
+
+ + + + + +### Function `initialize_stake_owner` + + +
public entry fun initialize_stake_owner(owner: &signer, initial_stake_amount: u64, operator: address, voter: address)
+
+ + + + +
pragma verify_duration_estimate = 120;
+include ResourceRequirement;
+let addr = signer::address_of(owner);
+ensures global<ValidatorConfig>(addr) == ValidatorConfig {
+    consensus_pubkey: vector::empty(),
+    network_addresses: vector::empty(),
+    fullnode_addresses: vector::empty(),
+    validator_index: 0,
+};
+ensures global<OwnerCapability>(addr) == OwnerCapability { pool_address: addr };
+let post stakepool = global<StakePool>(addr);
+let post active = stakepool.active.value;
+let post pending_active = stakepool.pending_active.value;
+ensures spec_is_current_epoch_validator(addr) ==>
+    pending_active == initial_stake_amount;
+ensures !spec_is_current_epoch_validator(addr) ==>
+    active == initial_stake_amount;
+
+ + + + + +### Function `initialize_validator` + + +
public entry fun initialize_validator(account: &signer, consensus_pubkey: vector<u8>, proof_of_possession: vector<u8>, network_addresses: vector<u8>, fullnode_addresses: vector<u8>)
+
+ + + + +
let pubkey_from_pop = bls12381::spec_public_key_from_bytes_with_pop(
+    consensus_pubkey,
+    proof_of_possession_from_bytes(proof_of_possession)
+);
+aborts_if !option::spec_is_some(pubkey_from_pop);
+let addr = signer::address_of(account);
+let post_addr = signer::address_of(account);
+let allowed = global<AllowedValidators>(@aptos_framework);
+aborts_if exists<ValidatorConfig>(addr);
+aborts_if exists<AllowedValidators>(@aptos_framework) && !vector::spec_contains(allowed.accounts, addr);
+aborts_if stake_pool_exists(addr);
+aborts_if exists<OwnerCapability>(addr);
+aborts_if !exists<account::Account>(addr);
+aborts_if global<account::Account>(addr).guid_creation_num + 12 > MAX_U64;
+aborts_if global<account::Account>(addr).guid_creation_num + 12 >= account::MAX_GUID_CREATION_NUM;
+ensures exists<StakePool>(post_addr);
+ensures global<OwnerCapability>(post_addr) == OwnerCapability { pool_address: post_addr };
+ensures global<ValidatorConfig>(post_addr) == ValidatorConfig {
+    consensus_pubkey,
+    network_addresses,
+    fullnode_addresses,
+    validator_index: 0,
+};
+
+ + + + + +### Function `extract_owner_cap` + + +
public fun extract_owner_cap(owner: &signer): stake::OwnerCapability
+
+ + + + +
pragma verify_duration_estimate = 300;
+let owner_address = signer::address_of(owner);
+aborts_if !exists<OwnerCapability>(owner_address);
+ensures !exists<OwnerCapability>(owner_address);
+
+ + + + + +### Function `deposit_owner_cap` + + +
public fun deposit_owner_cap(owner: &signer, owner_cap: stake::OwnerCapability)
+
+ + + + +
let owner_address = signer::address_of(owner);
+aborts_if exists<OwnerCapability>(owner_address);
+ensures exists<OwnerCapability>(owner_address);
+ensures global<OwnerCapability>(owner_address) == owner_cap;
+
+ + + + + +### Function `set_operator_with_cap` + + +
public fun set_operator_with_cap(owner_cap: &stake::OwnerCapability, new_operator: address)
+
+ + + + +
let pool_address = owner_cap.pool_address;
+let post post_stake_pool = global<StakePool>(pool_address);
+modifies global<StakePool>(pool_address);
+include StakedValueNochange;
+ensures post_stake_pool.operator_address == new_operator;
+
+ + + + + +### Function `set_delegated_voter_with_cap` + + +
public fun set_delegated_voter_with_cap(owner_cap: &stake::OwnerCapability, new_voter: address)
+
+ + + + +
let pool_address = owner_cap.pool_address;
+let post post_stake_pool = global<StakePool>(pool_address);
+include StakedValueNochange;
+aborts_if !exists<StakePool>(pool_address);
+modifies global<StakePool>(pool_address);
+ensures post_stake_pool.delegated_voter == new_voter;
+
+ + + + + +### Function `add_stake` + + +
public entry fun add_stake(owner: &signer, amount: u64)
+
+ + + + +
pragma verify_duration_estimate = 120;
+pragma aborts_if_is_partial;
+aborts_if reconfiguration_state::spec_is_in_progress();
+include ResourceRequirement;
+include AddStakeAbortsIfAndEnsures;
+
+ + + + + +### Function `add_stake_with_cap` + + +
public fun add_stake_with_cap(owner_cap: &stake::OwnerCapability, coins: coin::Coin<aptos_coin::AptosCoin>)
+
+ + + + +
pragma disable_invariants_in_body;
+pragma verify_duration_estimate = 300;
+include ResourceRequirement;
+let amount = coins.value;
+aborts_if reconfiguration_state::spec_is_in_progress();
+include AddStakeWithCapAbortsIfAndEnsures { amount };
+
+ + + + + +### Function `reactivate_stake_with_cap` + + +
public fun reactivate_stake_with_cap(owner_cap: &stake::OwnerCapability, amount: u64)
+
+ + + + +
let pool_address = owner_cap.pool_address;
+include StakedValueNochange;
+aborts_if reconfiguration_state::spec_is_in_progress();
+aborts_if !stake_pool_exists(pool_address);
+let pre_stake_pool = global<StakePool>(pool_address);
+let post stake_pool = global<StakePool>(pool_address);
+modifies global<StakePool>(pool_address);
+let min_amount = aptos_std::math64::min(amount, pre_stake_pool.pending_inactive.value);
+ensures stake_pool.pending_inactive.value == pre_stake_pool.pending_inactive.value - min_amount;
+ensures stake_pool.active.value == pre_stake_pool.active.value + min_amount;
+
+ + + + + +### Function `rotate_consensus_key` + + +
public entry fun rotate_consensus_key(operator: &signer, pool_address: address, new_consensus_pubkey: vector<u8>, proof_of_possession: vector<u8>)
+
+ + + + +
let pre_stake_pool = global<StakePool>(pool_address);
+let post validator_info = global<ValidatorConfig>(pool_address);
+aborts_if reconfiguration_state::spec_is_in_progress();
+aborts_if !exists<StakePool>(pool_address);
+aborts_if signer::address_of(operator) != pre_stake_pool.operator_address;
+aborts_if !exists<ValidatorConfig>(pool_address);
+let pubkey_from_pop = bls12381::spec_public_key_from_bytes_with_pop(
+    new_consensus_pubkey,
+    proof_of_possession_from_bytes(proof_of_possession)
+);
+aborts_if !option::spec_is_some(pubkey_from_pop);
+modifies global<ValidatorConfig>(pool_address);
+include StakedValueNochange;
+ensures validator_info.consensus_pubkey == new_consensus_pubkey;
+
+ + + + + +### Function `update_network_and_fullnode_addresses` + + +
public entry fun update_network_and_fullnode_addresses(operator: &signer, pool_address: address, new_network_addresses: vector<u8>, new_fullnode_addresses: vector<u8>)
+
+ + + + +
let pre_stake_pool = global<StakePool>(pool_address);
+let post validator_info = global<ValidatorConfig>(pool_address);
+modifies global<ValidatorConfig>(pool_address);
+include StakedValueNochange;
+aborts_if reconfiguration_state::spec_is_in_progress();
+aborts_if !exists<StakePool>(pool_address);
+aborts_if !exists<ValidatorConfig>(pool_address);
+aborts_if signer::address_of(operator) != pre_stake_pool.operator_address;
+ensures validator_info.network_addresses == new_network_addresses;
+ensures validator_info.fullnode_addresses == new_fullnode_addresses;
+
+ + + + + +### Function `increase_lockup_with_cap` + + +
public fun increase_lockup_with_cap(owner_cap: &stake::OwnerCapability)
+
+ + + + +
let config = global<staking_config::StakingConfig>(@aptos_framework);
+let pool_address = owner_cap.pool_address;
+let pre_stake_pool = global<StakePool>(pool_address);
+let post stake_pool = global<StakePool>(pool_address);
+let now_seconds = timestamp::spec_now_seconds();
+let lockup = config.recurring_lockup_duration_secs;
+modifies global<StakePool>(pool_address);
+include StakedValueNochange;
+aborts_if !exists<StakePool>(pool_address);
+aborts_if pre_stake_pool.locked_until_secs >= lockup + now_seconds;
+aborts_if lockup + now_seconds > MAX_U64;
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+ensures stake_pool.locked_until_secs == lockup + now_seconds;
+
+ + + + + +### Function `join_validator_set` + + +
public entry fun join_validator_set(operator: &signer, pool_address: address)
+
+ + + + +
pragma disable_invariants_in_body;
+aborts_if !staking_config::get_allow_validator_set_change(staking_config::get());
+aborts_if !exists<StakePool>(pool_address);
+aborts_if !exists<ValidatorConfig>(pool_address);
+aborts_if !exists<StakingConfig>(@aptos_framework);
+aborts_if !exists<ValidatorSet>(@aptos_framework);
+aborts_if reconfiguration_state::spec_is_in_progress();
+let stake_pool = global<StakePool>(pool_address);
+let validator_set = global<ValidatorSet>(@aptos_framework);
+let post p_validator_set = global<ValidatorSet>(@aptos_framework);
+aborts_if signer::address_of(operator) != stake_pool.operator_address;
+aborts_if option::spec_is_some(spec_find_validator(validator_set.active_validators, pool_address)) ||
+            option::spec_is_some(spec_find_validator(validator_set.pending_inactive, pool_address)) ||
+                option::spec_is_some(spec_find_validator(validator_set.pending_active, pool_address));
+let config = staking_config::get();
+let voting_power = get_next_epoch_voting_power(stake_pool);
+let minimum_stake = config.minimum_stake;
+let maximum_stake = config.maximum_stake;
+aborts_if voting_power < minimum_stake;
+aborts_if voting_power >maximum_stake;
+let validator_config = global<ValidatorConfig>(pool_address);
+aborts_if vector::is_empty(validator_config.consensus_pubkey);
+let validator_set_size = vector::length(validator_set.active_validators) + vector::length(validator_set.pending_active) + 1;
+aborts_if validator_set_size > MAX_VALIDATOR_SET_SIZE;
+let voting_power_increase_limit = (staking_config::get_voting_power_increase_limit(config) as u128);
+aborts_if (validator_set.total_joining_power + (voting_power as u128)) > MAX_U128;
+aborts_if validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
+aborts_if validator_set.total_voting_power > 0 &&
+    (validator_set.total_joining_power + (voting_power as u128)) * 100 > validator_set.total_voting_power * voting_power_increase_limit;
+let post p_validator_info = ValidatorInfo {
+    addr: pool_address,
+    voting_power,
+    config: validator_config,
+};
+ensures validator_set.total_joining_power + voting_power == p_validator_set.total_joining_power;
+ensures vector::spec_contains(p_validator_set.pending_active, p_validator_info);
+
+ + + + + +### Function `unlock_with_cap` + + +
public fun unlock_with_cap(amount: u64, owner_cap: &stake::OwnerCapability)
+
+ + + + +
pragma verify_duration_estimate = 300;
+let pool_address = owner_cap.pool_address;
+let pre_stake_pool = global<StakePool>(pool_address);
+let post stake_pool = global<StakePool>(pool_address);
+aborts_if reconfiguration_state::spec_is_in_progress();
+aborts_if amount != 0 && !exists<StakePool>(pool_address);
+modifies global<StakePool>(pool_address);
+include StakedValueNochange;
+let min_amount = aptos_std::math64::min(amount,pre_stake_pool.active.value);
+ensures stake_pool.active.value == pre_stake_pool.active.value - min_amount;
+ensures stake_pool.pending_inactive.value == pre_stake_pool.pending_inactive.value + min_amount;
+
+ + + + + +### Function `withdraw` + + +
public entry fun withdraw(owner: &signer, withdraw_amount: u64)
+
+ + + + +
pragma verify = false;
+aborts_if reconfiguration_state::spec_is_in_progress();
+let addr = signer::address_of(owner);
+let ownership_cap = global<OwnerCapability>(addr);
+let pool_address = ownership_cap.pool_address;
+let stake_pool = global<StakePool>(pool_address);
+aborts_if !exists<OwnerCapability>(addr);
+aborts_if !exists<StakePool>(pool_address);
+aborts_if !exists<ValidatorSet>(@aptos_framework);
+let validator_set = global<ValidatorSet>(@aptos_framework);
+let bool_find_validator = !option::spec_is_some(spec_find_validator(validator_set.active_validators, pool_address)) &&
+            !option::spec_is_some(spec_find_validator(validator_set.pending_inactive, pool_address)) &&
+                !option::spec_is_some(spec_find_validator(validator_set.pending_active, pool_address));
+aborts_if bool_find_validator && !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+let new_withdraw_amount_1 = min(withdraw_amount, stake_pool.inactive.value + stake_pool.pending_inactive.value);
+let new_withdraw_amount_2 = min(withdraw_amount, stake_pool.inactive.value);
+aborts_if bool_find_validator && timestamp::now_seconds() > stake_pool.locked_until_secs &&
+            new_withdraw_amount_1 > 0 && stake_pool.inactive.value + stake_pool.pending_inactive.value < new_withdraw_amount_1;
+aborts_if !(bool_find_validator && exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework)) &&
+            new_withdraw_amount_2 > 0 && stake_pool.inactive.value < new_withdraw_amount_2;
+aborts_if !exists<coin::CoinStore<AptosCoin>>(addr);
+include coin::DepositAbortsIf<AptosCoin>{account_addr: addr};
+let coin_store = global<coin::CoinStore<AptosCoin>>(addr);
+let post p_coin_store = global<coin::CoinStore<AptosCoin>>(addr);
+ensures bool_find_validator && timestamp::now_seconds() > stake_pool.locked_until_secs
+            && exists<account::Account>(addr) && exists<coin::CoinStore<AptosCoin>>(addr) ==>
+                coin_store.coin.value + new_withdraw_amount_1 == p_coin_store.coin.value;
+ensures !(bool_find_validator && exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework))
+            && exists<account::Account>(addr) && exists<coin::CoinStore<AptosCoin>>(addr) ==>
+                coin_store.coin.value + new_withdraw_amount_2 == p_coin_store.coin.value;
+
+ + + + + +### Function `leave_validator_set` + + +
public entry fun leave_validator_set(operator: &signer, pool_address: address)
+
+ + + + +
pragma disable_invariants_in_body;
+requires chain_status::is_operating();
+aborts_if reconfiguration_state::spec_is_in_progress();
+let config = staking_config::get();
+aborts_if !staking_config::get_allow_validator_set_change(config);
+aborts_if !exists<StakePool>(pool_address);
+aborts_if !exists<ValidatorSet>(@aptos_framework);
+aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+let stake_pool = global<StakePool>(pool_address);
+aborts_if signer::address_of(operator) != stake_pool.operator_address;
+let validator_set = global<ValidatorSet>(@aptos_framework);
+let validator_find_bool = option::spec_is_some(spec_find_validator(validator_set.pending_active, pool_address));
+let active_validators = validator_set.active_validators;
+let pending_active = validator_set.pending_active;
+let post post_validator_set = global<ValidatorSet>(@aptos_framework);
+let post post_active_validators = post_validator_set.active_validators;
+let pending_inactive_validators = validator_set.pending_inactive;
+let post post_pending_inactive_validators = post_validator_set.pending_inactive;
+ensures len(active_validators) + len(pending_inactive_validators) == len(post_active_validators)
+    + len(post_pending_inactive_validators);
+aborts_if !validator_find_bool && !option::spec_is_some(spec_find_validator(active_validators, pool_address));
+aborts_if !validator_find_bool && vector::length(validator_set.active_validators) <= option::spec_borrow(spec_find_validator(active_validators, pool_address));
+aborts_if !validator_find_bool && vector::length(validator_set.active_validators) < 2;
+aborts_if validator_find_bool && vector::length(validator_set.pending_active) <= option::spec_borrow(spec_find_validator(pending_active, pool_address));
+let post p_validator_set = global<ValidatorSet>(@aptos_framework);
+let validator_stake = (get_next_epoch_voting_power(stake_pool) as u128);
+ensures validator_find_bool && validator_set.total_joining_power > validator_stake ==>
+            p_validator_set.total_joining_power == validator_set.total_joining_power - validator_stake;
+ensures !validator_find_bool ==> !option::spec_is_some(spec_find_validator(p_validator_set.pending_active, pool_address));
+
+ + + + + +### Function `is_current_epoch_validator` + + +
public fun is_current_epoch_validator(pool_address: address): bool
+
+ + + + +
include ResourceRequirement;
+aborts_if !spec_has_stake_pool(pool_address);
+ensures result == spec_is_current_epoch_validator(pool_address);
+
+ + + + + +### Function `update_performance_statistics` + + +
public(friend) fun update_performance_statistics(proposer_index: option::Option<u64>, failed_proposer_indices: vector<u64>)
+
+ + + + +
requires chain_status::is_operating();
+aborts_if false;
+let validator_perf = global<ValidatorPerformance>(@aptos_framework);
+let post post_validator_perf = global<ValidatorPerformance>(@aptos_framework);
+let validator_len = len(validator_perf.validators);
+ensures (option::spec_is_some(ghost_proposer_idx) && option::spec_borrow(ghost_proposer_idx) < validator_len) ==>
+    (post_validator_perf.validators[option::spec_borrow(ghost_proposer_idx)].successful_proposals ==
+        validator_perf.validators[option::spec_borrow(ghost_proposer_idx)].successful_proposals + 1);
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch()
+
+ + + + +
pragma verify = false;
+pragma disable_invariants_in_body;
+include ResourceRequirement;
+include GetReconfigStartTimeRequirement;
+include staking_config::StakingRewardsConfigRequirement;
+include aptos_framework::aptos_coin::ExistsAptosCoin;
+// This enforces high-level requirement 4:
+aborts_if false;
+
+ + + + + +### Function `next_validator_consensus_infos` + + +
public fun next_validator_consensus_infos(): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + + +
pragma verify_duration_estimate = 300;
+aborts_if false;
+include ResourceRequirement;
+include GetReconfigStartTimeRequirement;
+include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
+
+ + + + + +### Function `validator_consensus_infos_from_validator_set` + + +
fun validator_consensus_infos_from_validator_set(validator_set: &stake::ValidatorSet): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + + +
aborts_if false;
+invariant spec_validator_indices_are_valid_config(validator_set.active_validators,
+    len(validator_set.active_validators) + len(validator_set.pending_inactive));
+invariant len(validator_set.pending_inactive) == 0 ||
+    spec_validator_indices_are_valid_config(validator_set.pending_inactive,
+        len(validator_set.active_validators) + len(validator_set.pending_inactive));
+
+ + + + + + + +
schema AddStakeWithCapAbortsIfAndEnsures {
+    owner_cap: OwnerCapability;
+    amount: u64;
+    let pool_address = owner_cap.pool_address;
+    aborts_if !exists<StakePool>(pool_address);
+    let config = global<staking_config::StakingConfig>(@aptos_framework);
+    let validator_set = global<ValidatorSet>(@aptos_framework);
+    let voting_power_increase_limit = config.voting_power_increase_limit;
+    let post post_validator_set = global<ValidatorSet>(@aptos_framework);
+    let update_voting_power_increase = amount != 0 && (spec_contains(validator_set.active_validators, pool_address)
+                                                       || spec_contains(validator_set.pending_active, pool_address));
+    aborts_if update_voting_power_increase && validator_set.total_joining_power + amount > MAX_U128;
+    ensures update_voting_power_increase ==> post_validator_set.total_joining_power == validator_set.total_joining_power + amount;
+    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
+            && validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
+    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
+            && validator_set.total_joining_power + amount > validator_set.total_voting_power * voting_power_increase_limit / 100;
+    let stake_pool = global<StakePool>(pool_address);
+    let post post_stake_pool = global<StakePool>(pool_address);
+    let value_pending_active = stake_pool.pending_active.value;
+    let value_active = stake_pool.active.value;
+    ensures amount != 0 && spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.pending_active.value == value_pending_active + amount;
+    ensures amount != 0 && !spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.active.value == value_active + amount;
+    let maximum_stake = config.maximum_stake;
+    let value_pending_inactive = stake_pool.pending_inactive.value;
+    let next_epoch_voting_power = value_pending_active + value_active + value_pending_inactive;
+    let voting_power = next_epoch_voting_power + amount;
+    aborts_if amount != 0 && voting_power > MAX_U64;
+    aborts_if amount != 0 && voting_power > maximum_stake;
+}
+
+ + + + + + + +
schema AddStakeAbortsIfAndEnsures {
+    owner: signer;
+    amount: u64;
+    let owner_address = signer::address_of(owner);
+    aborts_if !exists<OwnerCapability>(owner_address);
+    let owner_cap = global<OwnerCapability>(owner_address);
+    include AddStakeWithCapAbortsIfAndEnsures { owner_cap };
+}
+
+ + + + + + + +
fun spec_is_allowed(account: address): bool {
+   if (!exists<AllowedValidators>(@aptos_framework)) {
+       true
+   } else {
+       let allowed = global<AllowedValidators>(@aptos_framework);
+       contains(allowed.accounts, account)
+   }
+}
+
+ + + + + + + +
fun spec_find_validator(v: vector<ValidatorInfo>, addr: address): Option<u64>;
+
+ + + + + + + +
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
+   forall i in 0..len(validators):
+       spec_has_stake_pool(validators[i].addr) &&
+           spec_has_validator_config(validators[i].addr)
+}
+
+ + + + + + + +
fun spec_validators_are_initialized_addrs(addrs: vector<address>): bool {
+   forall i in 0..len(addrs):
+       spec_has_stake_pool(addrs[i]) &&
+           spec_has_validator_config(addrs[i])
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
+   spec_validator_indices_are_valid_addr(validators, spec_validator_index_upper_bound()) &&
+       spec_validator_indices_are_valid_config(validators, spec_validator_index_upper_bound())
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid_addr(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
+   forall i in 0..len(validators):
+       global<ValidatorConfig>(validators[i].addr).validator_index < upper_bound
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid_config(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
+   forall i in 0..len(validators):
+       validators[i].config.validator_index < upper_bound
+}
+
+ + + + + + + +
fun spec_validator_indices_active_pending_inactive(validator_set: ValidatorSet): bool {
+   len(validator_set.pending_inactive) + len(validator_set.active_validators) == spec_validator_index_upper_bound()
+}
+
+ + + + + + + +
fun spec_validator_index_upper_bound(): u64 {
+   len(global<ValidatorPerformance>(@aptos_framework).validators)
+}
+
+ + + + + + + +
fun spec_has_stake_pool(a: address): bool {
+   exists<StakePool>(a)
+}
+
+ + + + + + + +
fun spec_has_validator_config(a: address): bool {
+   exists<ValidatorConfig>(a)
+}
+
+ + + + + + + +
fun spec_rewards_amount(
+   stake_amount: u64,
+   num_successful_proposals: u64,
+   num_total_proposals: u64,
+   rewards_rate: u64,
+   rewards_rate_denominator: u64,
+): u64;
+
+ + + + + + + +
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
+   exists i in 0..len(validators): validators[i].addr == addr
+}
+
+ + + + + + + +
fun spec_is_current_epoch_validator(pool_address: address): bool {
+   let validator_set = global<ValidatorSet>(@aptos_framework);
+   !spec_contains(validator_set.pending_active, pool_address)
+       && (spec_contains(validator_set.active_validators, pool_address)
+       || spec_contains(validator_set.pending_inactive, pool_address))
+}
+
+ + + + + + + +
schema ResourceRequirement {
+    requires exists<AptosCoinCapabilities>(@aptos_framework);
+    requires exists<ValidatorPerformance>(@aptos_framework);
+    requires exists<ValidatorSet>(@aptos_framework);
+    requires exists<StakingConfig>(@aptos_framework);
+    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    requires exists<ValidatorFees>(@aptos_framework);
+}
+
+ + + + + + + +
fun spec_get_reward_rate_1(config: StakingConfig): num {
+   if (features::spec_periodical_reward_rate_decrease_enabled()) {
+       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
+       if (epoch_rewards_rate.value == 0) {
+           0
+       } else {
+           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
+           let denominator = if (denominator_0 > MAX_U64) {
+               MAX_U64
+           } else {
+               denominator_0
+           };
+           let nominator = aptos_std::fixed_point64::spec_multiply_u128(denominator, epoch_rewards_rate);
+           nominator
+       }
+   } else {
+           config.rewards_rate
+   }
+}
+
+ + + + + + + +
fun spec_get_reward_rate_2(config: StakingConfig): num {
+   if (features::spec_periodical_reward_rate_decrease_enabled()) {
+       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
+       if (epoch_rewards_rate.value == 0) {
+           1
+       } else {
+           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
+           let denominator = if (denominator_0 > MAX_U64) {
+               MAX_U64
+           } else {
+               denominator_0
+           };
+           denominator
+       }
+   } else {
+           config.rewards_rate_denominator
+   }
+}
+
+ + + + + +### Function `update_stake_pool` + + +
fun update_stake_pool(validator_perf: &stake::ValidatorPerformance, pool_address: address, staking_config: &staking_config::StakingConfig)
+
+ + + + +
pragma verify_duration_estimate = 300;
+include ResourceRequirement;
+include GetReconfigStartTimeRequirement;
+include staking_config::StakingRewardsConfigRequirement;
+include UpdateStakePoolAbortsIf;
+let stake_pool = global<StakePool>(pool_address);
+let validator_config = global<ValidatorConfig>(pool_address);
+let cur_validator_perf = validator_perf.validators[validator_config.validator_index];
+let num_successful_proposals = cur_validator_perf.successful_proposals;
+let num_total_proposals = cur_validator_perf.successful_proposals + cur_validator_perf.failed_proposals;
+let rewards_rate = spec_get_reward_rate_1(staking_config);
+let rewards_rate_denominator = spec_get_reward_rate_2(staking_config);
+let rewards_amount_1 = if (stake_pool.active.value > 0) {
+    spec_rewards_amount(stake_pool.active.value, num_successful_proposals, num_total_proposals, rewards_rate, rewards_rate_denominator)
+} else {
+    0
+};
+let rewards_amount_2 = if (stake_pool.pending_inactive.value > 0) {
+    spec_rewards_amount(stake_pool.pending_inactive.value, num_successful_proposals, num_total_proposals, rewards_rate, rewards_rate_denominator)
+} else {
+    0
+};
+let post post_stake_pool = global<StakePool>(pool_address);
+let post post_active_value = post_stake_pool.active.value;
+let post post_pending_inactive_value = post_stake_pool.pending_inactive.value;
+let fees_table = global<ValidatorFees>(@aptos_framework).fees_table;
+let post post_fees_table = global<ValidatorFees>(@aptos_framework).fees_table;
+let post post_inactive_value = post_stake_pool.inactive.value;
+ensures post_stake_pool.pending_active.value == 0;
+ensures if (features::spec_is_enabled(features::COLLECT_AND_DISTRIBUTE_GAS_FEES) && table::spec_contains(fees_table, pool_address)) {
+    !table::spec_contains(post_fees_table, pool_address) &&
+    post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value + table::spec_get(fees_table, pool_address).value
+} else {
+    post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value
+};
+ensures if (spec_get_reconfig_start_time_secs() >= stake_pool.locked_until_secs) {
+    post_pending_inactive_value == 0 &&
+    post_inactive_value == stake_pool.inactive.value + stake_pool.pending_inactive.value + rewards_amount_2
+} else {
+    post_pending_inactive_value == stake_pool.pending_inactive.value + rewards_amount_2
+};
+
+ + + + + + + +
schema UpdateStakePoolAbortsIf {
+    pool_address: address;
+    validator_perf: ValidatorPerformance;
+    aborts_if !exists<StakePool>(pool_address);
+    aborts_if !exists<ValidatorConfig>(pool_address);
+    aborts_if global<ValidatorConfig>(pool_address).validator_index >= len(validator_perf.validators);
+    let aptos_addr = type_info::type_of<AptosCoin>().account_address;
+    aborts_if !exists<ValidatorFees>(aptos_addr);
+    let stake_pool = global<StakePool>(pool_address);
+    include DistributeRewardsAbortsIf {stake: stake_pool.active};
+    include DistributeRewardsAbortsIf {stake: stake_pool.pending_inactive};
+}
+
+ + + + + +### Function `get_reconfig_start_time_secs` + + +
fun get_reconfig_start_time_secs(): u64
+
+ + + + +
include GetReconfigStartTimeRequirement;
+
+ + + + + + + +
schema GetReconfigStartTimeRequirement {
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    include reconfiguration_state::StartTimeSecsRequirement;
+}
+
+ + + + + + + +
fun spec_get_reconfig_start_time_secs(): u64 {
+   if (exists<reconfiguration_state::State>(@aptos_framework)) {
+       reconfiguration_state::spec_start_time_secs()
+   } else {
+       timestamp::spec_now_seconds()
+   }
+}
+
+ + + + + +### Function `calculate_rewards_amount` + + +
fun calculate_rewards_amount(stake_amount: u64, num_successful_proposals: u64, num_total_proposals: u64, rewards_rate: u64, rewards_rate_denominator: u64): u64
+
+ + + + +
pragma opaque;
+pragma verify_duration_estimate = 300;
+requires rewards_rate <= MAX_REWARDS_RATE;
+requires rewards_rate_denominator > 0;
+requires rewards_rate <= rewards_rate_denominator;
+requires num_successful_proposals <= num_total_proposals;
+ensures [concrete] (rewards_rate_denominator * num_total_proposals == 0) ==> result == 0;
+ensures [concrete] (rewards_rate_denominator * num_total_proposals > 0) ==> {
+    let amount = ((stake_amount * rewards_rate * num_successful_proposals) /
+        (rewards_rate_denominator * num_total_proposals));
+    result == amount
+};
+aborts_if false;
+ensures [abstract] result == spec_rewards_amount(
+    stake_amount,
+    num_successful_proposals,
+    num_total_proposals,
+    rewards_rate,
+    rewards_rate_denominator);
+
+ + + + + +### Function `distribute_rewards` + + +
fun distribute_rewards(stake: &mut coin::Coin<aptos_coin::AptosCoin>, num_successful_proposals: u64, num_total_proposals: u64, rewards_rate: u64, rewards_rate_denominator: u64): u64
+
+ + + + +
include ResourceRequirement;
+requires rewards_rate <= MAX_REWARDS_RATE;
+requires rewards_rate_denominator > 0;
+requires rewards_rate <= rewards_rate_denominator;
+requires num_successful_proposals <= num_total_proposals;
+include DistributeRewardsAbortsIf;
+ensures old(stake.value) > 0 ==>
+    result == spec_rewards_amount(
+        old(stake.value),
+        num_successful_proposals,
+        num_total_proposals,
+        rewards_rate,
+        rewards_rate_denominator);
+ensures old(stake.value) > 0 ==>
+    stake.value == old(stake.value) + spec_rewards_amount(
+        old(stake.value),
+        num_successful_proposals,
+        num_total_proposals,
+        rewards_rate,
+        rewards_rate_denominator);
+ensures old(stake.value) == 0 ==> result == 0;
+ensures old(stake.value) == 0 ==> stake.value == old(stake.value);
+
+ + + + + + + +
schema DistributeRewardsAbortsIf {
+    stake: Coin<AptosCoin>;
+    num_successful_proposals: num;
+    num_total_proposals: num;
+    rewards_rate: num;
+    rewards_rate_denominator: num;
+    let stake_amount = coin::value(stake);
+    let rewards_amount = if (stake_amount > 0) {
+        spec_rewards_amount(stake_amount, num_successful_proposals, num_total_proposals, rewards_rate, rewards_rate_denominator)
+    } else {
+        0
+    };
+    let amount = rewards_amount;
+    let addr = type_info::type_of<AptosCoin>().account_address;
+    aborts_if (rewards_amount > 0) && !exists<coin::CoinInfo<AptosCoin>>(addr);
+    modifies global<coin::CoinInfo<AptosCoin>>(addr);
+    include (rewards_amount > 0) ==> coin::CoinAddAbortsIf<AptosCoin> { amount: amount };
+}
+
+ + + + + +### Function `append` + + +
fun append<T>(v1: &mut vector<T>, v2: &mut vector<T>)
+
+ + + + +
pragma opaque, verify = false;
+aborts_if false;
+ensures len(v1) == old(len(v1) + len(v2));
+ensures len(v2) == 0;
+ensures (forall i in 0..old(len(v1)): v1[i] == old(v1[i]));
+ensures (forall i in old(len(v1))..len(v1): v1[i] == old(v2[len(v2) - (i - len(v1)) - 1]));
+
+ + + + + +### Function `find_validator` + + +
fun find_validator(v: &vector<stake::ValidatorInfo>, addr: address): option::Option<u64>
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures option::is_none(result) ==> (forall i in 0..len(v): v[i].addr != addr);
+ensures option::is_some(result) ==> v[option::borrow(result)].addr == addr;
+ensures option::is_some(result) ==> spec_contains(v, addr);
+ensures [abstract] result == spec_find_validator(v,addr);
+
+ + + + + +### Function `update_voting_power_increase` + + +
fun update_voting_power_increase(increase_amount: u64)
+
+ + + + +
requires !reconfiguration_state::spec_is_in_progress();
+aborts_if !exists<ValidatorSet>(@aptos_framework);
+aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+let aptos = @aptos_framework;
+let pre_validator_set = global<ValidatorSet>(aptos);
+let post validator_set = global<ValidatorSet>(aptos);
+let staking_config = global<staking_config::StakingConfig>(aptos);
+let voting_power_increase_limit = staking_config.voting_power_increase_limit;
+aborts_if pre_validator_set.total_joining_power + increase_amount > MAX_U128;
+aborts_if pre_validator_set.total_voting_power > 0 && pre_validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
+aborts_if pre_validator_set.total_voting_power > 0 &&
+    pre_validator_set.total_joining_power + increase_amount > pre_validator_set.total_voting_power * voting_power_increase_limit / 100;
+ensures validator_set.total_voting_power > 0 ==>
+    validator_set.total_joining_power <= validator_set.total_voting_power * voting_power_increase_limit / 100;
+ensures validator_set.total_joining_power == pre_validator_set.total_joining_power + increase_amount;
+
+ + + + + +### Function `assert_stake_pool_exists` + + +
fun assert_stake_pool_exists(pool_address: address)
+
+ + + + +
aborts_if !stake_pool_exists(pool_address);
+
+ + + + + +### Function `configure_allowed_validators` + + +
public fun configure_allowed_validators(aptos_framework: &signer, accounts: vector<address>)
+
+ + + + +
let aptos_framework_address = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_framework_address);
+let post allowed = global<AllowedValidators>(aptos_framework_address);
+ensures allowed.accounts == accounts;
+
+ + + + + +### Function `assert_owner_cap_exists` + + +
fun assert_owner_cap_exists(owner: address)
+
+ + + + +
aborts_if !exists<OwnerCapability>(owner);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_config.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_config.md new file mode 100644 index 0000000000000..f053716b0e392 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_config.md @@ -0,0 +1,1595 @@ + + + +# Module `0x1::staking_config` + +Provides the configuration for staking and rewards + + +- [Resource `StakingConfig`](#0x1_staking_config_StakingConfig) +- [Resource `StakingRewardsConfig`](#0x1_staking_config_StakingRewardsConfig) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_staking_config_initialize) +- [Function `reward_rate`](#0x1_staking_config_reward_rate) +- [Function `initialize_rewards`](#0x1_staking_config_initialize_rewards) +- [Function `get`](#0x1_staking_config_get) +- [Function `get_allow_validator_set_change`](#0x1_staking_config_get_allow_validator_set_change) +- [Function `get_required_stake`](#0x1_staking_config_get_required_stake) +- [Function `get_recurring_lockup_duration`](#0x1_staking_config_get_recurring_lockup_duration) +- [Function `get_reward_rate`](#0x1_staking_config_get_reward_rate) +- [Function `get_voting_power_increase_limit`](#0x1_staking_config_get_voting_power_increase_limit) +- [Function `calculate_and_save_latest_epoch_rewards_rate`](#0x1_staking_config_calculate_and_save_latest_epoch_rewards_rate) +- [Function `calculate_and_save_latest_rewards_config`](#0x1_staking_config_calculate_and_save_latest_rewards_config) +- [Function `update_required_stake`](#0x1_staking_config_update_required_stake) +- [Function `update_recurring_lockup_duration_secs`](#0x1_staking_config_update_recurring_lockup_duration_secs) +- [Function `update_rewards_rate`](#0x1_staking_config_update_rewards_rate) +- [Function `update_rewards_config`](#0x1_staking_config_update_rewards_config) +- [Function `update_voting_power_increase_limit`](#0x1_staking_config_update_voting_power_increase_limit) +- [Function `validate_required_stake`](#0x1_staking_config_validate_required_stake) +- [Function `validate_rewards_config`](#0x1_staking_config_validate_rewards_config) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Resource `StakingConfig`](#@Specification_1_StakingConfig) + - [Resource `StakingRewardsConfig`](#@Specification_1_StakingRewardsConfig) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `reward_rate`](#@Specification_1_reward_rate) + - [Function `initialize_rewards`](#@Specification_1_initialize_rewards) + - [Function `get`](#@Specification_1_get) + - [Function `get_reward_rate`](#@Specification_1_get_reward_rate) + - [Function `calculate_and_save_latest_epoch_rewards_rate`](#@Specification_1_calculate_and_save_latest_epoch_rewards_rate) + - [Function `calculate_and_save_latest_rewards_config`](#@Specification_1_calculate_and_save_latest_rewards_config) + - [Function `update_required_stake`](#@Specification_1_update_required_stake) + - [Function `update_recurring_lockup_duration_secs`](#@Specification_1_update_recurring_lockup_duration_secs) + - [Function `update_rewards_rate`](#@Specification_1_update_rewards_rate) + - [Function `update_rewards_config`](#@Specification_1_update_rewards_config) + - [Function `update_voting_power_increase_limit`](#@Specification_1_update_voting_power_increase_limit) + - [Function `validate_required_stake`](#@Specification_1_validate_required_stake) + - [Function `validate_rewards_config`](#@Specification_1_validate_rewards_config) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::fixed_point64;
+use 0x1::math_fixed64;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+
+ + + + + +## Resource `StakingConfig` + +Validator set configurations that will be stored with the @aptos_framework account. + + +
struct StakingConfig has copy, drop, key
+
+ + + +
+Fields + + +
+
+minimum_stake: u64 +
+
+ +
+
+maximum_stake: u64 +
+
+ +
+
+recurring_lockup_duration_secs: u64 +
+
+ +
+
+allow_validator_set_change: bool +
+
+ +
+
+rewards_rate: u64 +
+
+ +
+
+rewards_rate_denominator: u64 +
+
+ +
+
+voting_power_increase_limit: u64 +
+
+ +
+
+ + +
+ + + +## Resource `StakingRewardsConfig` + +Staking reward configurations that will be stored with the @aptos_framework account. + + +
struct StakingRewardsConfig has copy, drop, key
+
+ + + +
+Fields + + +
+
+rewards_rate: fixed_point64::FixedPoint64 +
+
+ +
+
+min_rewards_rate: fixed_point64::FixedPoint64 +
+
+ +
+
+rewards_rate_period_in_secs: u64 +
+
+ +
+
+last_rewards_rate_period_start_in_secs: u64 +
+
+ +
+
+rewards_rate_decrease_rate: fixed_point64::FixedPoint64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + +Denominator of number in basis points. 1 bps(basis points) = 0.01%. + + +
const BPS_DENOMINATOR: u64 = 10000;
+
+ + + + + +The function has been deprecated. + + +
const EDEPRECATED_FUNCTION: u64 = 10;
+
+ + + + + +The function is disabled or hasn't been enabled. + + +
const EDISABLED_FUNCTION: u64 = 11;
+
+ + + + + +Specified start time of last rewards rate period is invalid, which must be not late than the current timestamp. + + +
const EINVALID_LAST_REWARDS_RATE_PERIOD_START: u64 = 7;
+
+ + + + + +Specified min rewards rate is invalid, which must be within [0, rewards_rate]. + + +
const EINVALID_MIN_REWARDS_RATE: u64 = 6;
+
+ + + + + +Specified rewards rate is invalid, which must be within [0, MAX_REWARDS_RATE]. + + +
const EINVALID_REWARDS_RATE: u64 = 5;
+
+ + + + + +Specified rewards rate decrease rate is invalid, which must be not greater than BPS_DENOMINATOR. + + +
const EINVALID_REWARDS_RATE_DECREASE_RATE: u64 = 8;
+
+ + + + + +Specified rewards rate period is invalid. It must be larger than 0 and cannot be changed if configured. + + +
const EINVALID_REWARDS_RATE_PERIOD: u64 = 9;
+
+ + + + + +Specified stake range is invalid. Max must be greater than min. + + +
const EINVALID_STAKE_RANGE: u64 = 3;
+
+ + + + + +The voting power increase limit percentage must be within (0, 50]. + + +
const EINVALID_VOTING_POWER_INCREASE_LIMIT: u64 = 4;
+
+ + + + + +Stake lockup duration cannot be zero. + + +
const EZERO_LOCKUP_DURATION: u64 = 1;
+
+ + + + + +Reward rate denominator cannot be zero. + + +
const EZERO_REWARDS_RATE_DENOMINATOR: u64 = 2;
+
+ + + + + +Limit the maximum value of rewards_rate in order to avoid any arithmetic overflow. + + +
const MAX_REWARDS_RATE: u64 = 1000000;
+
+ + + + + +1 year => 365 * 24 * 60 * 60 + + +
const ONE_YEAR_IN_SECS: u64 = 31536000;
+
+ + + + + +## Function `initialize` + +Only called during genesis. + + +
public(friend) fun initialize(aptos_framework: &signer, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(
+    aptos_framework: &signer,
+    minimum_stake: u64,
+    maximum_stake: u64,
+    recurring_lockup_duration_secs: u64,
+    allow_validator_set_change: bool,
+    rewards_rate: u64,
+    rewards_rate_denominator: u64,
+    voting_power_increase_limit: u64,
+) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    // This can fail genesis but is necessary so that any misconfigurations can be corrected before genesis succeeds
+    validate_required_stake(minimum_stake, maximum_stake);
+
+    assert!(recurring_lockup_duration_secs > 0, error::invalid_argument(EZERO_LOCKUP_DURATION));
+    assert!(
+        rewards_rate_denominator > 0,
+        error::invalid_argument(EZERO_REWARDS_RATE_DENOMINATOR),
+    );
+    assert!(
+        voting_power_increase_limit > 0 && voting_power_increase_limit <= 50,
+        error::invalid_argument(EINVALID_VOTING_POWER_INCREASE_LIMIT),
+    );
+
+    // `rewards_rate` which is the numerator is limited to be `<= MAX_REWARDS_RATE` in order to avoid the arithmetic
+    // overflow in the rewards calculation. `rewards_rate_denominator` can be adjusted to get the desired rewards
+    // rate (i.e., rewards_rate / rewards_rate_denominator).
+    assert!(rewards_rate <= MAX_REWARDS_RATE, error::invalid_argument(EINVALID_REWARDS_RATE));
+
+    // We assert that (rewards_rate / rewards_rate_denominator <= 1).
+    assert!(rewards_rate <= rewards_rate_denominator, error::invalid_argument(EINVALID_REWARDS_RATE));
+
+    move_to(aptos_framework, StakingConfig {
+        minimum_stake,
+        maximum_stake,
+        recurring_lockup_duration_secs,
+        allow_validator_set_change,
+        rewards_rate,
+        rewards_rate_denominator,
+        voting_power_increase_limit,
+    });
+}
+
+ + + +
+ + + +## Function `reward_rate` + +Return the reward rate of this epoch as a tuple (numerator, denominator). + + +
#[view]
+public fun reward_rate(): (u64, u64)
+
+ + + +
+Implementation + + +
public fun reward_rate(): (u64, u64) acquires StakingRewardsConfig, StakingConfig {
+    get_reward_rate(borrow_global<StakingConfig>(@aptos_framework))
+}
+
+ + + +
+ + + +## Function `initialize_rewards` + +Initialize rewards configurations. +Can only be called as part of the Aptos governance proposal process established by the AptosGovernance module. + + +
public fun initialize_rewards(aptos_framework: &signer, rewards_rate: fixed_point64::FixedPoint64, min_rewards_rate: fixed_point64::FixedPoint64, rewards_rate_period_in_secs: u64, last_rewards_rate_period_start_in_secs: u64, rewards_rate_decrease_rate: fixed_point64::FixedPoint64)
+
+ + + +
+Implementation + + +
public fun initialize_rewards(
+    aptos_framework: &signer,
+    rewards_rate: FixedPoint64,
+    min_rewards_rate: FixedPoint64,
+    rewards_rate_period_in_secs: u64,
+    last_rewards_rate_period_start_in_secs: u64,
+    rewards_rate_decrease_rate: FixedPoint64,
+) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    validate_rewards_config(
+        rewards_rate,
+        min_rewards_rate,
+        rewards_rate_period_in_secs,
+        rewards_rate_decrease_rate,
+    );
+    assert!(
+        timestamp::now_seconds() >= last_rewards_rate_period_start_in_secs,
+        error::invalid_argument(EINVALID_LAST_REWARDS_RATE_PERIOD_START)
+    );
+
+    move_to(aptos_framework, StakingRewardsConfig {
+        rewards_rate,
+        min_rewards_rate,
+        rewards_rate_period_in_secs,
+        last_rewards_rate_period_start_in_secs,
+        rewards_rate_decrease_rate,
+    });
+}
+
+ + + +
+ + + +## Function `get` + + + +
public fun get(): staking_config::StakingConfig
+
+ + + +
+Implementation + + +
public fun get(): StakingConfig acquires StakingConfig {
+    *borrow_global<StakingConfig>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `get_allow_validator_set_change` + +Return whether validator set changes are allowed + + +
public fun get_allow_validator_set_change(config: &staking_config::StakingConfig): bool
+
+ + + +
+Implementation + + +
public fun get_allow_validator_set_change(config: &StakingConfig): bool {
+    config.allow_validator_set_change
+}
+
+ + + +
+ + + +## Function `get_required_stake` + +Return the required min/max stake. + + +
public fun get_required_stake(config: &staking_config::StakingConfig): (u64, u64)
+
+ + + +
+Implementation + + +
public fun get_required_stake(config: &StakingConfig): (u64, u64) {
+    (config.minimum_stake, config.maximum_stake)
+}
+
+ + + +
+ + + +## Function `get_recurring_lockup_duration` + +Return the recurring lockup duration that every validator is automatically renewed for (unless they unlock and +withdraw all funds). + + +
public fun get_recurring_lockup_duration(config: &staking_config::StakingConfig): u64
+
+ + + +
+Implementation + + +
public fun get_recurring_lockup_duration(config: &StakingConfig): u64 {
+    config.recurring_lockup_duration_secs
+}
+
+ + + +
+ + + +## Function `get_reward_rate` + +Return the reward rate of this epoch. + + +
public fun get_reward_rate(config: &staking_config::StakingConfig): (u64, u64)
+
+ + + +
+Implementation + + +
public fun get_reward_rate(config: &StakingConfig): (u64, u64) acquires StakingRewardsConfig {
+    if (features::periodical_reward_rate_decrease_enabled()) {
+        let epoch_rewards_rate = borrow_global<StakingRewardsConfig>(@aptos_framework).rewards_rate;
+        if (fixed_point64::is_zero(epoch_rewards_rate)) {
+            (0u64, 1u64)
+        } else {
+            // Maximize denominator for higher precision.
+            // Restriction: nominator <= MAX_REWARDS_RATE && denominator <= MAX_U64
+            let denominator = fixed_point64::divide_u128((MAX_REWARDS_RATE as u128), epoch_rewards_rate);
+            if (denominator > MAX_U64) {
+                denominator = MAX_U64
+            };
+            let nominator = (fixed_point64::multiply_u128(denominator, epoch_rewards_rate) as u64);
+            (nominator, (denominator as u64))
+        }
+    } else {
+        (config.rewards_rate, config.rewards_rate_denominator)
+    }
+}
+
+ + + +
+ + + +## Function `get_voting_power_increase_limit` + +Return the joining limit %. + + +
public fun get_voting_power_increase_limit(config: &staking_config::StakingConfig): u64
+
+ + + +
+Implementation + + +
public fun get_voting_power_increase_limit(config: &StakingConfig): u64 {
+    config.voting_power_increase_limit
+}
+
+ + + +
+ + + +## Function `calculate_and_save_latest_epoch_rewards_rate` + +Calculate and save the latest rewards rate. + + +
public(friend) fun calculate_and_save_latest_epoch_rewards_rate(): fixed_point64::FixedPoint64
+
+ + + +
+Implementation + + +
public(friend) fun calculate_and_save_latest_epoch_rewards_rate(): FixedPoint64 acquires StakingRewardsConfig {
+    assert!(features::periodical_reward_rate_decrease_enabled(), error::invalid_state(EDISABLED_FUNCTION));
+    let staking_rewards_config = calculate_and_save_latest_rewards_config();
+    staking_rewards_config.rewards_rate
+}
+
+ + + +
+ + + +## Function `calculate_and_save_latest_rewards_config` + +Calculate and return the up-to-date StakingRewardsConfig. + + +
fun calculate_and_save_latest_rewards_config(): staking_config::StakingRewardsConfig
+
+ + + +
+Implementation + + +
fun calculate_and_save_latest_rewards_config(): StakingRewardsConfig acquires StakingRewardsConfig {
+    let staking_rewards_config = borrow_global_mut<StakingRewardsConfig>(@aptos_framework);
+    let current_time_in_secs = timestamp::now_seconds();
+    assert!(
+        current_time_in_secs >= staking_rewards_config.last_rewards_rate_period_start_in_secs,
+        error::invalid_argument(EINVALID_LAST_REWARDS_RATE_PERIOD_START)
+    );
+    if (current_time_in_secs - staking_rewards_config.last_rewards_rate_period_start_in_secs < staking_rewards_config.rewards_rate_period_in_secs) {
+        return *staking_rewards_config
+    };
+    // Rewards rate decrease rate cannot be greater than 100%. Otherwise rewards rate will be negative.
+    assert!(
+        fixed_point64::ceil(staking_rewards_config.rewards_rate_decrease_rate) <= 1,
+        error::invalid_argument(EINVALID_REWARDS_RATE_DECREASE_RATE)
+    );
+    let new_rate = math_fixed64::mul_div(
+        staking_rewards_config.rewards_rate,
+        fixed_point64::sub(
+            fixed_point64::create_from_u128(1),
+            staking_rewards_config.rewards_rate_decrease_rate,
+        ),
+        fixed_point64::create_from_u128(1),
+    );
+    new_rate = fixed_point64::max(new_rate, staking_rewards_config.min_rewards_rate);
+
+    staking_rewards_config.rewards_rate = new_rate;
+    staking_rewards_config.last_rewards_rate_period_start_in_secs =
+        staking_rewards_config.last_rewards_rate_period_start_in_secs +
+        staking_rewards_config.rewards_rate_period_in_secs;
+    return *staking_rewards_config
+}
+
+ + + +
+ + + +## Function `update_required_stake` + +Update the min and max stake amounts. +Can only be called as part of the Aptos governance proposal process established by the AptosGovernance module. + + +
public fun update_required_stake(aptos_framework: &signer, minimum_stake: u64, maximum_stake: u64)
+
+ + + +
+Implementation + + +
public fun update_required_stake(
+    aptos_framework: &signer,
+    minimum_stake: u64,
+    maximum_stake: u64,
+) acquires StakingConfig {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    validate_required_stake(minimum_stake, maximum_stake);
+
+    let staking_config = borrow_global_mut<StakingConfig>(@aptos_framework);
+    staking_config.minimum_stake = minimum_stake;
+    staking_config.maximum_stake = maximum_stake;
+}
+
+ + + +
+ + + +## Function `update_recurring_lockup_duration_secs` + +Update the recurring lockup duration. +Can only be called as part of the Aptos governance proposal process established by the AptosGovernance module. + + +
public fun update_recurring_lockup_duration_secs(aptos_framework: &signer, new_recurring_lockup_duration_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_recurring_lockup_duration_secs(
+    aptos_framework: &signer,
+    new_recurring_lockup_duration_secs: u64,
+) acquires StakingConfig {
+    assert!(new_recurring_lockup_duration_secs > 0, error::invalid_argument(EZERO_LOCKUP_DURATION));
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let staking_config = borrow_global_mut<StakingConfig>(@aptos_framework);
+    staking_config.recurring_lockup_duration_secs = new_recurring_lockup_duration_secs;
+}
+
+ + + +
+ + + +## Function `update_rewards_rate` + +DEPRECATING +Update the rewards rate. +Can only be called as part of the Aptos governance proposal process established by the AptosGovernance module. + + +
public fun update_rewards_rate(aptos_framework: &signer, new_rewards_rate: u64, new_rewards_rate_denominator: u64)
+
+ + + +
+Implementation + + +
public fun update_rewards_rate(
+    aptos_framework: &signer,
+    new_rewards_rate: u64,
+    new_rewards_rate_denominator: u64,
+) acquires StakingConfig {
+    assert!(!features::periodical_reward_rate_decrease_enabled(), error::invalid_state(EDEPRECATED_FUNCTION));
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        new_rewards_rate_denominator > 0,
+        error::invalid_argument(EZERO_REWARDS_RATE_DENOMINATOR),
+    );
+    // `rewards_rate` which is the numerator is limited to be `<= MAX_REWARDS_RATE` in order to avoid the arithmetic
+    // overflow in the rewards calculation. `rewards_rate_denominator` can be adjusted to get the desired rewards
+    // rate (i.e., rewards_rate / rewards_rate_denominator).
+    assert!(new_rewards_rate <= MAX_REWARDS_RATE, error::invalid_argument(EINVALID_REWARDS_RATE));
+
+    // We assert that (rewards_rate / rewards_rate_denominator <= 1).
+    assert!(new_rewards_rate <= new_rewards_rate_denominator, error::invalid_argument(EINVALID_REWARDS_RATE));
+
+    let staking_config = borrow_global_mut<StakingConfig>(@aptos_framework);
+    staking_config.rewards_rate = new_rewards_rate;
+    staking_config.rewards_rate_denominator = new_rewards_rate_denominator;
+}
+
+ + + +
+ + + +## Function `update_rewards_config` + + + +
public fun update_rewards_config(aptos_framework: &signer, rewards_rate: fixed_point64::FixedPoint64, min_rewards_rate: fixed_point64::FixedPoint64, rewards_rate_period_in_secs: u64, rewards_rate_decrease_rate: fixed_point64::FixedPoint64)
+
+ + + +
+Implementation + + +
public fun update_rewards_config(
+    aptos_framework: &signer,
+    rewards_rate: FixedPoint64,
+    min_rewards_rate: FixedPoint64,
+    rewards_rate_period_in_secs: u64,
+    rewards_rate_decrease_rate: FixedPoint64,
+) acquires StakingRewardsConfig {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    validate_rewards_config(
+        rewards_rate,
+        min_rewards_rate,
+        rewards_rate_period_in_secs,
+        rewards_rate_decrease_rate,
+    );
+
+    let staking_rewards_config = borrow_global_mut<StakingRewardsConfig>(@aptos_framework);
+    // Currently rewards_rate_period_in_secs is not allowed to be changed because this could bring complicated
+    // logics. At the moment the argument is just a placeholder for future use.
+    assert!(
+        rewards_rate_period_in_secs == staking_rewards_config.rewards_rate_period_in_secs,
+        error::invalid_argument(EINVALID_REWARDS_RATE_PERIOD),
+    );
+    staking_rewards_config.rewards_rate = rewards_rate;
+    staking_rewards_config.min_rewards_rate = min_rewards_rate;
+    staking_rewards_config.rewards_rate_period_in_secs = rewards_rate_period_in_secs;
+    staking_rewards_config.rewards_rate_decrease_rate = rewards_rate_decrease_rate;
+}
+
+ + + +
+ + + +## Function `update_voting_power_increase_limit` + +Update the joining limit %. +Can only be called as part of the Aptos governance proposal process established by the AptosGovernance module. + + +
public fun update_voting_power_increase_limit(aptos_framework: &signer, new_voting_power_increase_limit: u64)
+
+ + + +
+Implementation + + +
public fun update_voting_power_increase_limit(
+    aptos_framework: &signer,
+    new_voting_power_increase_limit: u64,
+) acquires StakingConfig {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        new_voting_power_increase_limit > 0 && new_voting_power_increase_limit <= 50,
+        error::invalid_argument(EINVALID_VOTING_POWER_INCREASE_LIMIT),
+    );
+
+    let staking_config = borrow_global_mut<StakingConfig>(@aptos_framework);
+    staking_config.voting_power_increase_limit = new_voting_power_increase_limit;
+}
+
+ + + +
+ + + +## Function `validate_required_stake` + + + +
fun validate_required_stake(minimum_stake: u64, maximum_stake: u64)
+
+ + + +
+Implementation + + +
fun validate_required_stake(minimum_stake: u64, maximum_stake: u64) {
+    assert!(minimum_stake <= maximum_stake && maximum_stake > 0, error::invalid_argument(EINVALID_STAKE_RANGE));
+}
+
+ + + +
+ + + +## Function `validate_rewards_config` + + + +
fun validate_rewards_config(rewards_rate: fixed_point64::FixedPoint64, min_rewards_rate: fixed_point64::FixedPoint64, rewards_rate_period_in_secs: u64, rewards_rate_decrease_rate: fixed_point64::FixedPoint64)
+
+ + + +
+Implementation + + +
fun validate_rewards_config(
+    rewards_rate: FixedPoint64,
+    min_rewards_rate: FixedPoint64,
+    rewards_rate_period_in_secs: u64,
+    rewards_rate_decrease_rate: FixedPoint64,
+) {
+    // Bound rewards rate to avoid arithmetic overflow.
+    assert!(
+        less_or_equal(rewards_rate, fixed_point64::create_from_u128((1u128))),
+        error::invalid_argument(EINVALID_REWARDS_RATE)
+    );
+    assert!(
+        less_or_equal(min_rewards_rate, rewards_rate),
+        error::invalid_argument(EINVALID_MIN_REWARDS_RATE)
+    );
+    // Rewards rate decrease rate cannot be greater than 100%. Otherwise rewards rate will be negative.
+    assert!(
+        fixed_point64::ceil(rewards_rate_decrease_rate) <= 1,
+        error::invalid_argument(EINVALID_REWARDS_RATE_DECREASE_RATE)
+    );
+    // This field, rewards_rate_period_in_secs must be greater than 0.
+    // TODO: rewards_rate_period_in_secs should be longer than the epoch duration but reading epoch duration causes a circular dependency.
+    assert!(
+        rewards_rate_period_in_secs > 0,
+        error::invalid_argument(EINVALID_REWARDS_RATE_PERIOD),
+    );
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The ability to initialize the staking config and staking rewards resources, as well as the ability to update the staking config and staking rewards should only be available to the Aptos framework account.MediumThe function initialize and initialize_rewards are used to initialize the StakingConfig and StakingRewardConfig resources. Updating the resources, can be done using the update_required_stake, update_recurring_lockup_duration_secs, update_rewards_rate, update_rewards_config, update_voting_power_increase_limit functions, which ensure that the signer is aptos_framework using the assert_aptos_framework function.Verified via initialize, initialize_rewards, update_required_stake, update_recurring_lockup_duration_secs, update_rewards_rate, update_rewards_config, and update_voting_power_increase_limit.
2The voting power increase, in a staking config resource, should always be greater than 0 and less or equal to 50.HighDuring the initialization and update of the staking config, the value of voting_power_increase_limit is ensured to be in the range of (0 to 50].Ensured via initialize and update_voting_power_increase_limit. Formally verified via StakingConfig.
3The recurring lockup duration, in a staking config resource, should always be greater than 0.MediumDuring the initialization and update of the staking config, the value of recurring_lockup_duration_secs is ensured to be greater than 0.Ensured via initialize and update_recurring_lockup_duration_secs. Formally verified via StakingConfig.
4The calculation of rewards should not be possible if the last reward rate period just started.HighThe function calculate_and_save_latest_rewards_config ensures that last_rewards_rate_period_start_in_secs is greater or equal to the current timestamp.Formally verified in StakingRewardsConfigEnabledRequirement.
5The rewards rate should always be less than or equal to 100%.HighWhen initializing and updating the rewards rate, it is ensured that the rewards_rate is less or equal to MAX_REWARDS_RATE, otherwise rewards rate will be negative.Verified via StakingConfig.
6The reward rate's denominator should never be 0.HighWhile initializing and updating the rewards rate, rewards_rate_denominator is ensured to be greater than 0.Verified via StakingConfig.
7The reward rate's nominator and dominator ratio should always be less or equal to 1.HighWhen initializing and updating the rewards rate, it is ensured that rewards_rate is less or equal to rewards_rate_denominator.Verified via StakingConfig.
+ + + + + + +### Module-level Specification + + +
invariant [suspendable] chain_status::is_operating() ==> exists<StakingConfig>(@aptos_framework);
+pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Resource `StakingConfig` + + +
struct StakingConfig has copy, drop, key
+
+ + + +
+
+minimum_stake: u64 +
+
+ +
+
+maximum_stake: u64 +
+
+ +
+
+recurring_lockup_duration_secs: u64 +
+
+ +
+
+allow_validator_set_change: bool +
+
+ +
+
+rewards_rate: u64 +
+
+ +
+
+rewards_rate_denominator: u64 +
+
+ +
+
+voting_power_increase_limit: u64 +
+
+ +
+
+ + + +
// This enforces high-level requirement 5:
+invariant rewards_rate <= MAX_REWARDS_RATE;
+// This enforces high-level requirement 6:
+invariant rewards_rate_denominator > 0;
+// This enforces high-level requirement 7:
+invariant rewards_rate <= rewards_rate_denominator;
+// This enforces high-level requirement 3:
+invariant recurring_lockup_duration_secs > 0;
+// This enforces high-level requirement 2:
+invariant voting_power_increase_limit > 0 && voting_power_increase_limit <= 50;
+
+ + + + + +### Resource `StakingRewardsConfig` + + +
struct StakingRewardsConfig has copy, drop, key
+
+ + + +
+
+rewards_rate: fixed_point64::FixedPoint64 +
+
+ +
+
+min_rewards_rate: fixed_point64::FixedPoint64 +
+
+ +
+
+rewards_rate_period_in_secs: u64 +
+
+ +
+
+last_rewards_rate_period_start_in_secs: u64 +
+
+ +
+
+rewards_rate_decrease_rate: fixed_point64::FixedPoint64 +
+
+ +
+
+ + + +
invariant fixed_point64::spec_less_or_equal(
+    rewards_rate,
+    fixed_point64::spec_create_from_u128((1u128)));
+invariant fixed_point64::spec_less_or_equal(min_rewards_rate, rewards_rate);
+invariant rewards_rate_period_in_secs > 0;
+invariant fixed_point64::spec_ceil(rewards_rate_decrease_rate) <= 1;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64)
+
+ + +Caller must be @aptos_framework. +The maximum_stake must be greater than maximum_stake in the range of Specified stake and the maximum_stake greater than zero. +The rewards_rate_denominator must greater than zero. +Only this %0-%50 of current total voting power is allowed to join the validator set in each epoch. +The rewards_rate which is the numerator is limited to be <= MAX_REWARDS_RATE in order to avoid the arithmetic overflow in the rewards calculation. +rewards_rate/rewards_rate_denominator <= 1. +StakingConfig does not exist under the aptos_framework before creating it. + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+aborts_if minimum_stake > maximum_stake || maximum_stake == 0;
+// This enforces high-level requirement 3:
+aborts_if recurring_lockup_duration_secs == 0;
+aborts_if rewards_rate_denominator == 0;
+// This enforces high-level requirement 2:
+aborts_if voting_power_increase_limit == 0 || voting_power_increase_limit > 50;
+aborts_if rewards_rate > MAX_REWARDS_RATE;
+aborts_if rewards_rate > rewards_rate_denominator;
+aborts_if exists<StakingConfig>(addr);
+ensures exists<StakingConfig>(addr);
+
+ + + + + +### Function `reward_rate` + + +
#[view]
+public fun reward_rate(): (u64, u64)
+
+ + + + +
let config = global<StakingConfig>(@aptos_framework);
+aborts_if !exists<StakingConfig>(@aptos_framework);
+include StakingRewardsConfigRequirement;
+ensures (features::spec_periodical_reward_rate_decrease_enabled() &&
+    (global<StakingRewardsConfig>(@aptos_framework).rewards_rate.value as u64) != 0) ==>
+    result_1 <= MAX_REWARDS_RATE && result_2 <= MAX_U64;
+
+ + + + + +### Function `initialize_rewards` + + +
public fun initialize_rewards(aptos_framework: &signer, rewards_rate: fixed_point64::FixedPoint64, min_rewards_rate: fixed_point64::FixedPoint64, rewards_rate_period_in_secs: u64, last_rewards_rate_period_start_in_secs: u64, rewards_rate_decrease_rate: fixed_point64::FixedPoint64)
+
+ + +Caller must be @aptos_framework. +last_rewards_rate_period_start_in_secs cannot be later than now. +Abort at any condition in StakingRewardsConfigValidationAborts. +StakingRewardsConfig does not exist under the aptos_framework before creating it. + + +
pragma verify_duration_estimate = 120;
+requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+aborts_if last_rewards_rate_period_start_in_secs > timestamp::spec_now_seconds();
+include StakingRewardsConfigValidationAbortsIf;
+aborts_if exists<StakingRewardsConfig>(addr);
+ensures exists<StakingRewardsConfig>(addr);
+
+ + + + + +### Function `get` + + +
public fun get(): staking_config::StakingConfig
+
+ + + + +
aborts_if !exists<StakingConfig>(@aptos_framework);
+
+ + + + + +### Function `get_reward_rate` + + +
public fun get_reward_rate(config: &staking_config::StakingConfig): (u64, u64)
+
+ + + + +
include StakingRewardsConfigRequirement;
+ensures (features::spec_periodical_reward_rate_decrease_enabled() &&
+    (global<StakingRewardsConfig>(@aptos_framework).rewards_rate.value as u64) != 0) ==>
+        result_1 <= MAX_REWARDS_RATE && result_2 <= MAX_U64;
+
+ + + + + +### Function `calculate_and_save_latest_epoch_rewards_rate` + + +
public(friend) fun calculate_and_save_latest_epoch_rewards_rate(): fixed_point64::FixedPoint64
+
+ + + + +
pragma verify_duration_estimate = 120;
+aborts_if !exists<StakingRewardsConfig>(@aptos_framework);
+aborts_if !features::spec_periodical_reward_rate_decrease_enabled();
+include StakingRewardsConfigRequirement;
+
+ + + + + +### Function `calculate_and_save_latest_rewards_config` + + +
fun calculate_and_save_latest_rewards_config(): staking_config::StakingRewardsConfig
+
+ + + + +
pragma verify_duration_estimate = 120;
+requires features::spec_periodical_reward_rate_decrease_enabled();
+include StakingRewardsConfigRequirement;
+aborts_if !exists<StakingRewardsConfig>(@aptos_framework);
+
+ + + + + +### Function `update_required_stake` + + +
public fun update_required_stake(aptos_framework: &signer, minimum_stake: u64, maximum_stake: u64)
+
+ + +Caller must be @aptos_framework. +The maximum_stake must be greater than maximum_stake in the range of Specified stake and the maximum_stake greater than zero. +The StakingConfig is under @aptos_framework. + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+aborts_if minimum_stake > maximum_stake || maximum_stake == 0;
+aborts_if !exists<StakingConfig>(@aptos_framework);
+ensures global<StakingConfig>(@aptos_framework).minimum_stake == minimum_stake &&
+    global<StakingConfig>(@aptos_framework).maximum_stake == maximum_stake;
+
+ + + + + +### Function `update_recurring_lockup_duration_secs` + + +
public fun update_recurring_lockup_duration_secs(aptos_framework: &signer, new_recurring_lockup_duration_secs: u64)
+
+ + +Caller must be @aptos_framework. +The new_recurring_lockup_duration_secs must greater than zero. +The StakingConfig is under @aptos_framework. + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+// This enforces high-level requirement 3:
+aborts_if new_recurring_lockup_duration_secs == 0;
+aborts_if !exists<StakingConfig>(@aptos_framework);
+ensures global<StakingConfig>(@aptos_framework).recurring_lockup_duration_secs == new_recurring_lockup_duration_secs;
+
+ + + + + +### Function `update_rewards_rate` + + +
public fun update_rewards_rate(aptos_framework: &signer, new_rewards_rate: u64, new_rewards_rate_denominator: u64)
+
+ + +Caller must be @aptos_framework. +The new_rewards_rate_denominator must greater than zero. +The StakingConfig is under @aptos_framework. +The rewards_rate which is the numerator is limited to be <= MAX_REWARDS_RATE in order to avoid the arithmetic overflow in the rewards calculation. +rewards_rate/rewards_rate_denominator <= 1. + + +
aborts_if features::spec_periodical_reward_rate_decrease_enabled();
+let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+aborts_if new_rewards_rate_denominator == 0;
+aborts_if !exists<StakingConfig>(@aptos_framework);
+aborts_if new_rewards_rate > MAX_REWARDS_RATE;
+aborts_if new_rewards_rate > new_rewards_rate_denominator;
+let post staking_config = global<StakingConfig>(@aptos_framework);
+ensures staking_config.rewards_rate == new_rewards_rate;
+ensures staking_config.rewards_rate_denominator == new_rewards_rate_denominator;
+
+ + + + + +### Function `update_rewards_config` + + +
public fun update_rewards_config(aptos_framework: &signer, rewards_rate: fixed_point64::FixedPoint64, min_rewards_rate: fixed_point64::FixedPoint64, rewards_rate_period_in_secs: u64, rewards_rate_decrease_rate: fixed_point64::FixedPoint64)
+
+ + +Caller must be @aptos_framework. +StakingRewardsConfig is under the @aptos_framework. + + +
pragma verify_duration_estimate = 120;
+include StakingRewardsConfigRequirement;
+let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+aborts_if global<StakingRewardsConfig>(@aptos_framework).rewards_rate_period_in_secs != rewards_rate_period_in_secs;
+include StakingRewardsConfigValidationAbortsIf;
+aborts_if !exists<StakingRewardsConfig>(addr);
+let post staking_rewards_config = global<StakingRewardsConfig>(@aptos_framework);
+ensures staking_rewards_config.rewards_rate == rewards_rate;
+ensures staking_rewards_config.min_rewards_rate == min_rewards_rate;
+ensures staking_rewards_config.rewards_rate_period_in_secs == rewards_rate_period_in_secs;
+ensures staking_rewards_config.rewards_rate_decrease_rate == rewards_rate_decrease_rate;
+
+ + + + + +### Function `update_voting_power_increase_limit` + + +
public fun update_voting_power_increase_limit(aptos_framework: &signer, new_voting_power_increase_limit: u64)
+
+ + +Caller must be @aptos_framework. +Only this %0-%50 of current total voting power is allowed to join the validator set in each epoch. +The StakingConfig is under @aptos_framework. + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 1:
+aborts_if addr != @aptos_framework;
+// This enforces high-level requirement 2:
+aborts_if new_voting_power_increase_limit == 0 || new_voting_power_increase_limit > 50;
+aborts_if !exists<StakingConfig>(@aptos_framework);
+ensures global<StakingConfig>(@aptos_framework).voting_power_increase_limit == new_voting_power_increase_limit;
+
+ + + + + +### Function `validate_required_stake` + + +
fun validate_required_stake(minimum_stake: u64, maximum_stake: u64)
+
+ + +The maximum_stake must be greater than maximum_stake in the range of Specified stake and the maximum_stake greater than zero. + + +
aborts_if minimum_stake > maximum_stake || maximum_stake == 0;
+
+ + + + + +### Function `validate_rewards_config` + + +
fun validate_rewards_config(rewards_rate: fixed_point64::FixedPoint64, min_rewards_rate: fixed_point64::FixedPoint64, rewards_rate_period_in_secs: u64, rewards_rate_decrease_rate: fixed_point64::FixedPoint64)
+
+ + +Abort at any condition in StakingRewardsConfigValidationAborts. + + +
include StakingRewardsConfigValidationAbortsIf;
+
+ + +rewards_rate must be within [0, 1]. +min_rewards_rate must be not greater than rewards_rate. +rewards_rate_period_in_secs must be greater than 0. +rewards_rate_decrease_rate must be within [0,1]. + + + + + +
schema StakingRewardsConfigValidationAbortsIf {
+    rewards_rate: FixedPoint64;
+    min_rewards_rate: FixedPoint64;
+    rewards_rate_period_in_secs: u64;
+    rewards_rate_decrease_rate: FixedPoint64;
+    aborts_if fixed_point64::spec_greater(
+        rewards_rate,
+        fixed_point64::spec_create_from_u128((1u128)));
+    aborts_if fixed_point64::spec_greater(min_rewards_rate, rewards_rate);
+    aborts_if rewards_rate_period_in_secs == 0;
+    aborts_if fixed_point64::spec_ceil(rewards_rate_decrease_rate) > 1;
+}
+
+ + + + + + + +
schema StakingRewardsConfigRequirement {
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    include features::spec_periodical_reward_rate_decrease_enabled() ==> StakingRewardsConfigEnabledRequirement;
+}
+
+ + + + + + + +
schema StakingRewardsConfigEnabledRequirement {
+    requires exists<StakingRewardsConfig>(@aptos_framework);
+    let staking_rewards_config = global<StakingRewardsConfig>(@aptos_framework);
+    let rewards_rate = staking_rewards_config.rewards_rate;
+    let min_rewards_rate = staking_rewards_config.min_rewards_rate;
+    let rewards_rate_period_in_secs = staking_rewards_config.rewards_rate_period_in_secs;
+    let last_rewards_rate_period_start_in_secs = staking_rewards_config.last_rewards_rate_period_start_in_secs;
+    let rewards_rate_decrease_rate = staking_rewards_config.rewards_rate_decrease_rate;
+    requires fixed_point64::spec_less_or_equal(
+        rewards_rate,
+        fixed_point64::spec_create_from_u128((1u128)));
+    requires fixed_point64::spec_less_or_equal(min_rewards_rate, rewards_rate);
+    requires rewards_rate_period_in_secs > 0;
+    // This enforces high-level requirement 4:
+    requires last_rewards_rate_period_start_in_secs <= timestamp::spec_now_seconds();
+    requires fixed_point64::spec_ceil(rewards_rate_decrease_rate) <= 1;
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_contract.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_contract.md new file mode 100644 index 0000000000000..015101fcddbc0 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_contract.md @@ -0,0 +1,3635 @@ + + + +# Module `0x1::staking_contract` + +Allow stakers and operators to enter a staking contract with reward sharing. +The main accounting logic in a staking contract consists of 2 parts: +1. Tracks how much commission needs to be paid out to the operator. This is tracked with an increasing principal +amount that's updated every time the operator requests commission, the staker withdraws funds, or the staker +switches operators. +2. Distributions of funds to operators (commissions) and stakers (stake withdrawals) use the shares model provided +by the pool_u64 to track shares that increase in price as the stake pool accumulates rewards. + +Example flow: +1. A staker creates a staking contract with an operator by calling create_staking_contract() with 100 coins of +initial stake and commission = 10%. This means the operator will receive 10% of any accumulated rewards. A new stake +pool will be created and hosted in a separate account that's controlled by the staking contract. +2. The operator sets up a validator node and, once ready, joins the validator set by calling stake::join_validator_set +3. After some time, the stake pool gains rewards and now has 150 coins. +4. Operator can now call request_commission. 10% of (150 - 100) = 5 coins will be unlocked from the stake pool. The +staker's principal is now updated from 100 to 145 (150 coins - 5 coins of commission). The pending distribution pool +has 5 coins total and the operator owns all 5 shares of it. +5. Some more time has passed. The pool now has 50 more coins in rewards and a total balance of 195. The operator +calls request_commission again. Since the previous 5 coins have now become withdrawable, it'll be deposited into the +operator's account first. Their new commission will be 10% of (195 coins - 145 principal) = 5 coins. Principal is +updated to be 190 (195 - 5). Pending distribution pool has 5 coins and operator owns all 5 shares. +6. Staker calls unlock_stake to unlock 50 coins of stake, which gets added to the pending distribution pool. Based +on shares math, staker will be owning 50 shares and operator still owns 5 shares of the 55-coin pending distribution +pool. +7. Some time passes and the 55 coins become fully withdrawable from the stake pool. Due to accumulated rewards, the +55 coins become 70 coins. Calling distribute() distributes 6 coins to the operator and 64 coins to the validator. + + +- [Struct `StakingGroupContainer`](#0x1_staking_contract_StakingGroupContainer) +- [Struct `StakingContract`](#0x1_staking_contract_StakingContract) +- [Resource `Store`](#0x1_staking_contract_Store) +- [Resource `BeneficiaryForOperator`](#0x1_staking_contract_BeneficiaryForOperator) +- [Struct `UpdateCommissionEvent`](#0x1_staking_contract_UpdateCommissionEvent) +- [Struct `UpdateCommission`](#0x1_staking_contract_UpdateCommission) +- [Resource `StakingGroupUpdateCommissionEvent`](#0x1_staking_contract_StakingGroupUpdateCommissionEvent) +- [Struct `CreateStakingContract`](#0x1_staking_contract_CreateStakingContract) +- [Struct `UpdateVoter`](#0x1_staking_contract_UpdateVoter) +- [Struct `ResetLockup`](#0x1_staking_contract_ResetLockup) +- [Struct `AddStake`](#0x1_staking_contract_AddStake) +- [Struct `RequestCommission`](#0x1_staking_contract_RequestCommission) +- [Struct `UnlockStake`](#0x1_staking_contract_UnlockStake) +- [Struct `SwitchOperator`](#0x1_staking_contract_SwitchOperator) +- [Struct `AddDistribution`](#0x1_staking_contract_AddDistribution) +- [Struct `Distribute`](#0x1_staking_contract_Distribute) +- [Struct `SetBeneficiaryForOperator`](#0x1_staking_contract_SetBeneficiaryForOperator) +- [Struct `CreateStakingContractEvent`](#0x1_staking_contract_CreateStakingContractEvent) +- [Struct `UpdateVoterEvent`](#0x1_staking_contract_UpdateVoterEvent) +- [Struct `ResetLockupEvent`](#0x1_staking_contract_ResetLockupEvent) +- [Struct `AddStakeEvent`](#0x1_staking_contract_AddStakeEvent) +- [Struct `RequestCommissionEvent`](#0x1_staking_contract_RequestCommissionEvent) +- [Struct `UnlockStakeEvent`](#0x1_staking_contract_UnlockStakeEvent) +- [Struct `SwitchOperatorEvent`](#0x1_staking_contract_SwitchOperatorEvent) +- [Struct `AddDistributionEvent`](#0x1_staking_contract_AddDistributionEvent) +- [Struct `DistributeEvent`](#0x1_staking_contract_DistributeEvent) +- [Constants](#@Constants_0) +- [Function `stake_pool_address`](#0x1_staking_contract_stake_pool_address) +- [Function `last_recorded_principal`](#0x1_staking_contract_last_recorded_principal) +- [Function `commission_percentage`](#0x1_staking_contract_commission_percentage) +- [Function `staking_contract_amounts`](#0x1_staking_contract_staking_contract_amounts) +- [Function `pending_distribution_counts`](#0x1_staking_contract_pending_distribution_counts) +- [Function `staking_contract_exists`](#0x1_staking_contract_staking_contract_exists) +- [Function `beneficiary_for_operator`](#0x1_staking_contract_beneficiary_for_operator) +- [Function `get_expected_stake_pool_address`](#0x1_staking_contract_get_expected_stake_pool_address) +- [Function `create_staking_contract`](#0x1_staking_contract_create_staking_contract) +- [Function `create_staking_contract_with_coins`](#0x1_staking_contract_create_staking_contract_with_coins) +- [Function `add_stake`](#0x1_staking_contract_add_stake) +- [Function `update_voter`](#0x1_staking_contract_update_voter) +- [Function `reset_lockup`](#0x1_staking_contract_reset_lockup) +- [Function `update_commision`](#0x1_staking_contract_update_commision) +- [Function `request_commission`](#0x1_staking_contract_request_commission) +- [Function `request_commission_internal`](#0x1_staking_contract_request_commission_internal) +- [Function `unlock_stake`](#0x1_staking_contract_unlock_stake) +- [Function `unlock_rewards`](#0x1_staking_contract_unlock_rewards) +- [Function `switch_operator_with_same_commission`](#0x1_staking_contract_switch_operator_with_same_commission) +- [Function `switch_operator`](#0x1_staking_contract_switch_operator) +- [Function `set_beneficiary_for_operator`](#0x1_staking_contract_set_beneficiary_for_operator) +- [Function `distribute`](#0x1_staking_contract_distribute) +- [Function `distribute_internal`](#0x1_staking_contract_distribute_internal) +- [Function `assert_staking_contract_exists`](#0x1_staking_contract_assert_staking_contract_exists) +- [Function `add_distribution`](#0x1_staking_contract_add_distribution) +- [Function `get_staking_contract_amounts_internal`](#0x1_staking_contract_get_staking_contract_amounts_internal) +- [Function `create_stake_pool`](#0x1_staking_contract_create_stake_pool) +- [Function `update_distribution_pool`](#0x1_staking_contract_update_distribution_pool) +- [Function `create_resource_account_seed`](#0x1_staking_contract_create_resource_account_seed) +- [Function `new_staking_contracts_holder`](#0x1_staking_contract_new_staking_contracts_holder) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Struct `StakingContract`](#@Specification_1_StakingContract) + - [Function `stake_pool_address`](#@Specification_1_stake_pool_address) + - [Function `last_recorded_principal`](#@Specification_1_last_recorded_principal) + - [Function `commission_percentage`](#@Specification_1_commission_percentage) + - [Function `staking_contract_amounts`](#@Specification_1_staking_contract_amounts) + - [Function `pending_distribution_counts`](#@Specification_1_pending_distribution_counts) + - [Function `staking_contract_exists`](#@Specification_1_staking_contract_exists) + - [Function `beneficiary_for_operator`](#@Specification_1_beneficiary_for_operator) + - [Function `create_staking_contract`](#@Specification_1_create_staking_contract) + - [Function `create_staking_contract_with_coins`](#@Specification_1_create_staking_contract_with_coins) + - [Function `add_stake`](#@Specification_1_add_stake) + - [Function `update_voter`](#@Specification_1_update_voter) + - [Function `reset_lockup`](#@Specification_1_reset_lockup) + - [Function `update_commision`](#@Specification_1_update_commision) + - [Function `request_commission`](#@Specification_1_request_commission) + - [Function `request_commission_internal`](#@Specification_1_request_commission_internal) + - [Function `unlock_stake`](#@Specification_1_unlock_stake) + - [Function `unlock_rewards`](#@Specification_1_unlock_rewards) + - [Function `switch_operator_with_same_commission`](#@Specification_1_switch_operator_with_same_commission) + - [Function `switch_operator`](#@Specification_1_switch_operator) + - [Function `set_beneficiary_for_operator`](#@Specification_1_set_beneficiary_for_operator) + - [Function `distribute`](#@Specification_1_distribute) + - [Function `distribute_internal`](#@Specification_1_distribute_internal) + - [Function `assert_staking_contract_exists`](#@Specification_1_assert_staking_contract_exists) + - [Function `add_distribution`](#@Specification_1_add_distribution) + - [Function `get_staking_contract_amounts_internal`](#@Specification_1_get_staking_contract_amounts_internal) + - [Function `create_stake_pool`](#@Specification_1_create_stake_pool) + - [Function `update_distribution_pool`](#@Specification_1_update_distribution_pool) + - [Function `new_staking_contracts_holder`](#@Specification_1_new_staking_contracts_holder) + + +
use 0x1::account;
+use 0x1::aptos_account;
+use 0x1::aptos_coin;
+use 0x1::bcs;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::pool_u64;
+use 0x1::signer;
+use 0x1::simple_map;
+use 0x1::stake;
+use 0x1::staking_config;
+use 0x1::vector;
+
+ + + + + +## Struct `StakingGroupContainer` + + + +
#[resource_group(#[scope = module_])]
+struct StakingGroupContainer
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `StakingContract` + + + +
struct StakingContract has store
+
+ + + +
+Fields + + +
+
+principal: u64 +
+
+ +
+
+pool_address: address +
+
+ +
+
+owner_cap: stake::OwnerCapability +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+distribution_pool: pool_u64::Pool +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + +
+ + + +## Resource `Store` + + + +
struct Store has key
+
+ + + +
+Fields + + +
+
+staking_contracts: simple_map::SimpleMap<address, staking_contract::StakingContract> +
+
+ +
+
+create_staking_contract_events: event::EventHandle<staking_contract::CreateStakingContractEvent> +
+
+ +
+
+update_voter_events: event::EventHandle<staking_contract::UpdateVoterEvent> +
+
+ +
+
+reset_lockup_events: event::EventHandle<staking_contract::ResetLockupEvent> +
+
+ +
+
+add_stake_events: event::EventHandle<staking_contract::AddStakeEvent> +
+
+ +
+
+request_commission_events: event::EventHandle<staking_contract::RequestCommissionEvent> +
+
+ +
+
+unlock_stake_events: event::EventHandle<staking_contract::UnlockStakeEvent> +
+
+ +
+
+switch_operator_events: event::EventHandle<staking_contract::SwitchOperatorEvent> +
+
+ +
+
+add_distribution_events: event::EventHandle<staking_contract::AddDistributionEvent> +
+
+ +
+
+distribute_events: event::EventHandle<staking_contract::DistributeEvent> +
+
+ +
+
+ + +
+ + + +## Resource `BeneficiaryForOperator` + + + +
struct BeneficiaryForOperator has key
+
+ + + +
+Fields + + +
+
+beneficiary_for_operator: address +
+
+ +
+
+ + +
+ + + +## Struct `UpdateCommissionEvent` + + + +
struct UpdateCommissionEvent has drop, store
+
+ + + +
+Fields + + +
+
+staker: address +
+
+ +
+
+operator: address +
+
+ +
+
+old_commission_percentage: u64 +
+
+ +
+
+new_commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateCommission` + + + +
#[event]
+struct UpdateCommission has drop, store
+
+ + + +
+Fields + + +
+
+staker: address +
+
+ +
+
+operator: address +
+
+ +
+
+old_commission_percentage: u64 +
+
+ +
+
+new_commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Resource `StakingGroupUpdateCommissionEvent` + + + +
#[resource_group_member(#[group = 0x1::staking_contract::StakingGroupContainer])]
+struct StakingGroupUpdateCommissionEvent has key
+
+ + + +
+Fields + + +
+
+update_commission_events: event::EventHandle<staking_contract::UpdateCommissionEvent> +
+
+ +
+
+ + +
+ + + +## Struct `CreateStakingContract` + + + +
#[event]
+struct CreateStakingContract has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+voter: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+principal: u64 +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateVoter` + + + +
#[event]
+struct UpdateVoter has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+old_voter: address +
+
+ +
+
+new_voter: address +
+
+ +
+
+ + +
+ + + +## Struct `ResetLockup` + + + +
#[event]
+struct ResetLockup has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AddStake` + + + +
#[event]
+struct AddStake has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `RequestCommission` + + + +
#[event]
+struct RequestCommission has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+accumulated_rewards: u64 +
+
+ +
+
+commission_amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UnlockStake` + + + +
#[event]
+struct UnlockStake has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+commission_paid: u64 +
+
+ +
+
+ + +
+ + + +## Struct `SwitchOperator` + + + +
#[event]
+struct SwitchOperator has drop, store
+
+ + + +
+Fields + + +
+
+old_operator: address +
+
+ +
+
+new_operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AddDistribution` + + + +
#[event]
+struct AddDistribution has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Distribute` + + + +
#[event]
+struct Distribute has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+recipient: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `SetBeneficiaryForOperator` + + + +
#[event]
+struct SetBeneficiaryForOperator has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+old_beneficiary: address +
+
+ +
+
+new_beneficiary: address +
+
+ +
+
+ + +
+ + + +## Struct `CreateStakingContractEvent` + + + +
struct CreateStakingContractEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+voter: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+principal: u64 +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateVoterEvent` + + + +
struct UpdateVoterEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+old_voter: address +
+
+ +
+
+new_voter: address +
+
+ +
+
+ + +
+ + + +## Struct `ResetLockupEvent` + + + +
struct ResetLockupEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AddStakeEvent` + + + +
struct AddStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `RequestCommissionEvent` + + + +
struct RequestCommissionEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+accumulated_rewards: u64 +
+
+ +
+
+commission_amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UnlockStakeEvent` + + + +
struct UnlockStakeEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+commission_paid: u64 +
+
+ +
+
+ + +
+ + + +## Struct `SwitchOperatorEvent` + + + +
struct SwitchOperatorEvent has drop, store
+
+ + + +
+Fields + + +
+
+old_operator: address +
+
+ +
+
+new_operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AddDistributionEvent` + + + +
struct AddDistributionEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DistributeEvent` + + + +
struct DistributeEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+pool_address: address +
+
+ +
+
+recipient: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Commission percentage has to be between 0 and 100. + + +
const EINVALID_COMMISSION_PERCENTAGE: u64 = 2;
+
+ + + + + +Changing beneficiaries for operators is not supported. + + +
const EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED: u64 = 9;
+
+ + + + + +Staking contracts can't be merged. + + +
const ECANT_MERGE_STAKING_CONTRACTS: u64 = 5;
+
+ + + + + +Not enough active stake to withdraw. Some stake might still pending and will be active in the next epoch. + + +
const EINSUFFICIENT_ACTIVE_STAKE_TO_WITHDRAW: u64 = 7;
+
+ + + + + +Store amount must be at least the min stake required for a stake pool to join the validator set. + + +
const EINSUFFICIENT_STAKE_AMOUNT: u64 = 1;
+
+ + + + + +Caller must be either the staker, operator, or beneficiary. + + +
const ENOT_STAKER_OR_OPERATOR_OR_BENEFICIARY: u64 = 8;
+
+ + + + + +No staking contract between the staker and operator found. + + +
const ENO_STAKING_CONTRACT_FOUND_FOR_OPERATOR: u64 = 4;
+
+ + + + + +Staker has no staking contracts. + + +
const ENO_STAKING_CONTRACT_FOUND_FOR_STAKER: u64 = 3;
+
+ + + + + +The staking contract already exists and cannot be re-created. + + +
const ESTAKING_CONTRACT_ALREADY_EXISTS: u64 = 6;
+
+ + + + + +Maximum number of distributions a stake pool can support. + + +
const MAXIMUM_PENDING_DISTRIBUTIONS: u64 = 20;
+
+ + + + + + + +
const SALT: vector<u8> = [97, 112, 116, 111, 115, 95, 102, 114, 97, 109, 101, 119, 111, 114, 107, 58, 58, 115, 116, 97, 107, 105, 110, 103, 95, 99, 111, 110, 116, 114, 97, 99, 116];
+
+ + + + + +## Function `stake_pool_address` + +Return the address of the underlying stake pool for the staking contract between the provided staker and +operator. + +This errors out the staking contract with the provided staker and operator doesn't exist. + + +
#[view]
+public fun stake_pool_address(staker: address, operator: address): address
+
+ + + +
+Implementation + + +
public fun stake_pool_address(staker: address, operator: address): address acquires Store {
+    assert_staking_contract_exists(staker, operator);
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
+    simple_map::borrow(staking_contracts, &operator).pool_address
+}
+
+ + + +
+ + + +## Function `last_recorded_principal` + +Return the last recorded principal (the amount that 100% belongs to the staker with commission already paid for) +for staking contract between the provided staker and operator. + +This errors out the staking contract with the provided staker and operator doesn't exist. + + +
#[view]
+public fun last_recorded_principal(staker: address, operator: address): u64
+
+ + + +
+Implementation + + +
public fun last_recorded_principal(staker: address, operator: address): u64 acquires Store {
+    assert_staking_contract_exists(staker, operator);
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
+    simple_map::borrow(staking_contracts, &operator).principal
+}
+
+ + + +
+ + + +## Function `commission_percentage` + +Return percentage of accumulated rewards that will be paid to the operator as commission for staking contract +between the provided staker and operator. + +This errors out the staking contract with the provided staker and operator doesn't exist. + + +
#[view]
+public fun commission_percentage(staker: address, operator: address): u64
+
+ + + +
+Implementation + + +
public fun commission_percentage(staker: address, operator: address): u64 acquires Store {
+    assert_staking_contract_exists(staker, operator);
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
+    simple_map::borrow(staking_contracts, &operator).commission_percentage
+}
+
+ + + +
+ + + +## Function `staking_contract_amounts` + +Return a tuple of three numbers: +1. The total active stake in the underlying stake pool +2. The total accumulated rewards that haven't had commission paid out +3. The commission amount owned from those accumulated rewards. + +This errors out the staking contract with the provided staker and operator doesn't exist. + + +
#[view]
+public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64)
+
+ + + +
+Implementation + + +
public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64) acquires Store {
+    assert_staking_contract_exists(staker, operator);
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
+    let staking_contract = simple_map::borrow(staking_contracts, &operator);
+    get_staking_contract_amounts_internal(staking_contract)
+}
+
+ + + +
+ + + +## Function `pending_distribution_counts` + +Return the number of pending distributions (e.g. commission, withdrawals from stakers). + +This errors out the staking contract with the provided staker and operator doesn't exist. + + +
#[view]
+public fun pending_distribution_counts(staker: address, operator: address): u64
+
+ + + +
+Implementation + + +
public fun pending_distribution_counts(staker: address, operator: address): u64 acquires Store {
+    assert_staking_contract_exists(staker, operator);
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
+    pool_u64::shareholders_count(&simple_map::borrow(staking_contracts, &operator).distribution_pool)
+}
+
+ + + +
+ + + +## Function `staking_contract_exists` + +Return true if the staking contract between the provided staker and operator exists. + + +
#[view]
+public fun staking_contract_exists(staker: address, operator: address): bool
+
+ + + +
+Implementation + + +
public fun staking_contract_exists(staker: address, operator: address): bool acquires Store {
+    if (!exists<Store>(staker)) {
+        return false
+    };
+
+    let store = borrow_global<Store>(staker);
+    simple_map::contains_key(&store.staking_contracts, &operator)
+}
+
+ + + +
+ + + +## Function `beneficiary_for_operator` + +Return the beneficiary address of the operator. + + +
#[view]
+public fun beneficiary_for_operator(operator: address): address
+
+ + + +
+Implementation + + +
public fun beneficiary_for_operator(operator: address): address acquires BeneficiaryForOperator {
+    if (exists<BeneficiaryForOperator>(operator)) {
+        return borrow_global<BeneficiaryForOperator>(operator).beneficiary_for_operator
+    } else {
+        operator
+    }
+}
+
+ + + +
+ + + +## Function `get_expected_stake_pool_address` + +Return the address of the stake pool to be created with the provided staker, operator and seed. + + +
#[view]
+public fun get_expected_stake_pool_address(staker: address, operator: address, contract_creation_seed: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun get_expected_stake_pool_address(
+    staker: address,
+    operator: address,
+    contract_creation_seed: vector<u8>,
+): address {
+    let seed = create_resource_account_seed(staker, operator, contract_creation_seed);
+    account::create_resource_address(&staker, seed)
+}
+
+ + + +
+ + + +## Function `create_staking_contract` + +Staker can call this function to create a simple staking contract with a specified operator. + + +
public entry fun create_staking_contract(staker: &signer, operator: address, voter: address, amount: u64, commission_percentage: u64, contract_creation_seed: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun create_staking_contract(
+    staker: &signer,
+    operator: address,
+    voter: address,
+    amount: u64,
+    commission_percentage: u64,
+    // Optional seed used when creating the staking contract account.
+    contract_creation_seed: vector<u8>,
+) acquires Store {
+    let staked_coins = coin::withdraw<AptosCoin>(staker, amount);
+    create_staking_contract_with_coins(
+        staker, operator, voter, staked_coins, commission_percentage, contract_creation_seed);
+}
+
+ + + +
+ + + +## Function `create_staking_contract_with_coins` + +Staker can call this function to create a simple staking contract with a specified operator. + + +
public fun create_staking_contract_with_coins(staker: &signer, operator: address, voter: address, coins: coin::Coin<aptos_coin::AptosCoin>, commission_percentage: u64, contract_creation_seed: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun create_staking_contract_with_coins(
+    staker: &signer,
+    operator: address,
+    voter: address,
+    coins: Coin<AptosCoin>,
+    commission_percentage: u64,
+    // Optional seed used when creating the staking contract account.
+    contract_creation_seed: vector<u8>,
+): address acquires Store {
+    assert!(
+        commission_percentage >= 0 && commission_percentage <= 100,
+        error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE),
+    );
+    // The amount should be at least the min_stake_required, so the stake pool will be eligible to join the
+    // validator set.
+    let (min_stake_required, _) = staking_config::get_required_stake(&staking_config::get());
+    let principal = coin::value(&coins);
+    assert!(principal >= min_stake_required, error::invalid_argument(EINSUFFICIENT_STAKE_AMOUNT));
+
+    // Initialize Store resource if this is the first time the staker has delegated to anyone.
+    let staker_address = signer::address_of(staker);
+    if (!exists<Store>(staker_address)) {
+        move_to(staker, new_staking_contracts_holder(staker));
+    };
+
+    // Cannot create the staking contract if it already exists.
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contracts = &mut store.staking_contracts;
+    assert!(
+        !simple_map::contains_key(staking_contracts, &operator),
+        error::already_exists(ESTAKING_CONTRACT_ALREADY_EXISTS)
+    );
+
+    // Initialize the stake pool in a new resource account. This allows the same staker to contract with multiple
+    // different operators.
+    let (stake_pool_signer, stake_pool_signer_cap, owner_cap) =
+        create_stake_pool(staker, operator, voter, contract_creation_seed);
+
+    // Add the stake to the stake pool.
+    stake::add_stake_with_cap(&owner_cap, coins);
+
+    // Create the contract record.
+    let pool_address = signer::address_of(&stake_pool_signer);
+    simple_map::add(staking_contracts, operator, StakingContract {
+        principal,
+        pool_address,
+        owner_cap,
+        commission_percentage,
+        // Make sure we don't have too many pending recipients in the distribution pool.
+        // Otherwise, a griefing attack is possible where the staker can keep switching operators and create too
+        // many pending distributions. This can lead to out-of-gas failure whenever distribute() is called.
+        distribution_pool: pool_u64::create(MAXIMUM_PENDING_DISTRIBUTIONS),
+        signer_cap: stake_pool_signer_cap,
+    });
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(CreateStakingContract { operator, voter, pool_address, principal, commission_percentage });
+    };
+    emit_event(
+        &mut store.create_staking_contract_events,
+        CreateStakingContractEvent { operator, voter, pool_address, principal, commission_percentage },
+    );
+    pool_address
+}
+
+ + + +
+ + + +## Function `add_stake` + +Add more stake to an existing staking contract. + + +
public entry fun add_stake(staker: &signer, operator: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun add_stake(staker: &signer, operator: address, amount: u64) acquires Store {
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, operator);
+
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+
+    // Add the stake to the stake pool.
+    let staked_coins = coin::withdraw<AptosCoin>(staker, amount);
+    stake::add_stake_with_cap(&staking_contract.owner_cap, staked_coins);
+
+    staking_contract.principal = staking_contract.principal + amount;
+    let pool_address = staking_contract.pool_address;
+    if (std::features::module_event_migration_enabled()) {
+        emit(AddStake { operator, pool_address, amount });
+    };
+    emit_event(
+        &mut store.add_stake_events,
+        AddStakeEvent { operator, pool_address, amount },
+    );
+}
+
+ + + +
+ + + +## Function `update_voter` + +Convenient function to allow the staker to update the voter address in a staking contract they made. + + +
public entry fun update_voter(staker: &signer, operator: address, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun update_voter(staker: &signer, operator: address, new_voter: address) acquires Store {
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, operator);
+
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+    let pool_address = staking_contract.pool_address;
+    let old_voter = stake::get_delegated_voter(pool_address);
+    stake::set_delegated_voter_with_cap(&staking_contract.owner_cap, new_voter);
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(UpdateVoter { operator, pool_address, old_voter, new_voter });
+    };
+    emit_event(
+        &mut store.update_voter_events,
+        UpdateVoterEvent { operator, pool_address, old_voter, new_voter },
+    );
+
+}
+
+ + + +
+ + + +## Function `reset_lockup` + +Convenient function to allow the staker to reset their stake pool's lockup period to start now. + + +
public entry fun reset_lockup(staker: &signer, operator: address)
+
+ + + +
+Implementation + + +
public entry fun reset_lockup(staker: &signer, operator: address) acquires Store {
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, operator);
+
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+    let pool_address = staking_contract.pool_address;
+    stake::increase_lockup_with_cap(&staking_contract.owner_cap);
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(ResetLockup { operator, pool_address });
+    };
+    emit_event(&mut store.reset_lockup_events, ResetLockupEvent { operator, pool_address });
+}
+
+ + + +
+ + + +## Function `update_commision` + +Convenience function to allow a staker to update the commission percentage paid to the operator. +TODO: fix the typo in function name. commision -> commission + + +
public entry fun update_commision(staker: &signer, operator: address, new_commission_percentage: u64)
+
+ + + +
+Implementation + + +
public entry fun update_commision(
+    staker: &signer,
+    operator: address,
+    new_commission_percentage: u64
+) acquires Store, BeneficiaryForOperator, StakingGroupUpdateCommissionEvent {
+    assert!(
+        new_commission_percentage >= 0 && new_commission_percentage <= 100,
+        error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE),
+    );
+
+    let staker_address = signer::address_of(staker);
+    assert!(exists<Store>(staker_address), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_STAKER));
+
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+    distribute_internal(staker_address, operator, staking_contract, &mut store.distribute_events);
+    request_commission_internal(
+        operator,
+        staking_contract,
+        &mut store.add_distribution_events,
+        &mut store.request_commission_events,
+    );
+    let old_commission_percentage = staking_contract.commission_percentage;
+    staking_contract.commission_percentage = new_commission_percentage;
+    if (!exists<StakingGroupUpdateCommissionEvent>(staker_address)) {
+        move_to(
+            staker,
+            StakingGroupUpdateCommissionEvent {
+                update_commission_events: account::new_event_handle<UpdateCommissionEvent>(
+                    staker
+                )
+            }
+        )
+    };
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            UpdateCommission { staker: staker_address, operator, old_commission_percentage, new_commission_percentage }
+        );
+    };
+    emit_event(
+        &mut borrow_global_mut<StakingGroupUpdateCommissionEvent>(staker_address).update_commission_events,
+        UpdateCommissionEvent { staker: staker_address, operator, old_commission_percentage, new_commission_percentage }
+    );
+}
+
+ + + +
+ + + +## Function `request_commission` + +Unlock commission amount from the stake pool. Operator needs to wait for the amount to become withdrawable +at the end of the stake pool's lockup period before they can actually can withdraw_commission. + +Only staker, operator or beneficiary can call this. + + +
public entry fun request_commission(account: &signer, staker: address, operator: address)
+
+ + + +
+Implementation + + +
public entry fun request_commission(
+    account: &signer,
+    staker: address,
+    operator: address
+) acquires Store, BeneficiaryForOperator {
+    let account_addr = signer::address_of(account);
+    assert!(
+        account_addr == staker || account_addr == operator || account_addr == beneficiary_for_operator(operator),
+        error::unauthenticated(ENOT_STAKER_OR_OPERATOR_OR_BENEFICIARY)
+    );
+    assert_staking_contract_exists(staker, operator);
+
+    let store = borrow_global_mut<Store>(staker);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+    // Short-circuit if zero commission.
+    if (staking_contract.commission_percentage == 0) {
+        return
+    };
+
+    // Force distribution of any already inactive stake.
+    distribute_internal(staker, operator, staking_contract, &mut store.distribute_events);
+
+    request_commission_internal(
+        operator,
+        staking_contract,
+        &mut store.add_distribution_events,
+        &mut store.request_commission_events,
+    );
+}
+
+ + + +
+ + + +## Function `request_commission_internal` + + + +
fun request_commission_internal(operator: address, staking_contract: &mut staking_contract::StakingContract, add_distribution_events: &mut event::EventHandle<staking_contract::AddDistributionEvent>, request_commission_events: &mut event::EventHandle<staking_contract::RequestCommissionEvent>): u64
+
+ + + +
+Implementation + + +
fun request_commission_internal(
+    operator: address,
+    staking_contract: &mut StakingContract,
+    add_distribution_events: &mut EventHandle<AddDistributionEvent>,
+    request_commission_events: &mut EventHandle<RequestCommissionEvent>,
+): u64 {
+    // Unlock just the commission portion from the stake pool.
+    let (total_active_stake, accumulated_rewards, commission_amount) =
+        get_staking_contract_amounts_internal(staking_contract);
+    staking_contract.principal = total_active_stake - commission_amount;
+
+    // Short-circuit if there's no commission to pay.
+    if (commission_amount == 0) {
+        return 0
+    };
+
+    // Add a distribution for the operator.
+    add_distribution(operator, staking_contract, operator, commission_amount, add_distribution_events);
+
+    // Request to unlock the commission from the stake pool.
+    // This won't become fully unlocked until the stake pool's lockup expires.
+    stake::unlock_with_cap(commission_amount, &staking_contract.owner_cap);
+
+    let pool_address = staking_contract.pool_address;
+    if (std::features::module_event_migration_enabled()) {
+        emit(RequestCommission { operator, pool_address, accumulated_rewards, commission_amount });
+    };
+    emit_event(
+        request_commission_events,
+        RequestCommissionEvent { operator, pool_address, accumulated_rewards, commission_amount },
+    );
+
+    commission_amount
+}
+
+ + + +
+ + + +## Function `unlock_stake` + +Staker can call this to request withdrawal of part or all of their staking_contract. +This also triggers paying commission to the operator for accounting simplicity. + + +
public entry fun unlock_stake(staker: &signer, operator: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun unlock_stake(
+    staker: &signer,
+    operator: address,
+    amount: u64
+) acquires Store, BeneficiaryForOperator {
+    // Short-circuit if amount is 0.
+    if (amount == 0) return;
+
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, operator);
+
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+
+    // Force distribution of any already inactive stake.
+    distribute_internal(staker_address, operator, staking_contract, &mut store.distribute_events);
+
+    // For simplicity, we request commission to be paid out first. This avoids having to ensure to staker doesn't
+    // withdraw into the commission portion.
+    let commission_paid = request_commission_internal(
+        operator,
+        staking_contract,
+        &mut store.add_distribution_events,
+        &mut store.request_commission_events,
+    );
+
+    // If there's less active stake remaining than the amount requested (potentially due to commission),
+    // only withdraw up to the active amount.
+    let (active, _, _, _) = stake::get_stake(staking_contract.pool_address);
+    if (active < amount) {
+        amount = active;
+    };
+    staking_contract.principal = staking_contract.principal - amount;
+
+    // Record a distribution for the staker.
+    add_distribution(operator, staking_contract, staker_address, amount, &mut store.add_distribution_events);
+
+    // Request to unlock the distribution amount from the stake pool.
+    // This won't become fully unlocked until the stake pool's lockup expires.
+    stake::unlock_with_cap(amount, &staking_contract.owner_cap);
+
+    let pool_address = staking_contract.pool_address;
+    if (std::features::module_event_migration_enabled()) {
+        emit(UnlockStake { pool_address, operator, amount, commission_paid });
+    };
+    emit_event(
+        &mut store.unlock_stake_events,
+        UnlockStakeEvent { pool_address, operator, amount, commission_paid },
+    );
+}
+
+ + + +
+ + + +## Function `unlock_rewards` + +Unlock all accumulated rewards since the last recorded principals. + + +
public entry fun unlock_rewards(staker: &signer, operator: address)
+
+ + + +
+Implementation + + +
public entry fun unlock_rewards(staker: &signer, operator: address) acquires Store, BeneficiaryForOperator {
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, operator);
+
+    // Calculate how much rewards belongs to the staker after commission is paid.
+    let (_, accumulated_rewards, unpaid_commission) = staking_contract_amounts(staker_address, operator);
+    let staker_rewards = accumulated_rewards - unpaid_commission;
+    unlock_stake(staker, operator, staker_rewards);
+}
+
+ + + +
+ + + +## Function `switch_operator_with_same_commission` + +Allows staker to switch operator without going through the lenghthy process to unstake, without resetting commission. + + +
public entry fun switch_operator_with_same_commission(staker: &signer, old_operator: address, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun switch_operator_with_same_commission(
+    staker: &signer,
+    old_operator: address,
+    new_operator: address,
+) acquires Store, BeneficiaryForOperator {
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, old_operator);
+
+    let commission_percentage = commission_percentage(staker_address, old_operator);
+    switch_operator(staker, old_operator, new_operator, commission_percentage);
+}
+
+ + + +
+ + + +## Function `switch_operator` + +Allows staker to switch operator without going through the lenghthy process to unstake. + + +
public entry fun switch_operator(staker: &signer, old_operator: address, new_operator: address, new_commission_percentage: u64)
+
+ + + +
+Implementation + + +
public entry fun switch_operator(
+    staker: &signer,
+    old_operator: address,
+    new_operator: address,
+    new_commission_percentage: u64,
+) acquires Store, BeneficiaryForOperator {
+    let staker_address = signer::address_of(staker);
+    assert_staking_contract_exists(staker_address, old_operator);
+
+    // Merging two existing staking contracts is too complex as we'd need to merge two separate stake pools.
+    let store = borrow_global_mut<Store>(staker_address);
+    let staking_contracts = &mut store.staking_contracts;
+    assert!(
+        !simple_map::contains_key(staking_contracts, &new_operator),
+        error::invalid_state(ECANT_MERGE_STAKING_CONTRACTS),
+    );
+
+    let (_, staking_contract) = simple_map::remove(staking_contracts, &old_operator);
+    // Force distribution of any already inactive stake.
+    distribute_internal(staker_address, old_operator, &mut staking_contract, &mut store.distribute_events);
+
+    // For simplicity, we request commission to be paid out first. This avoids having to ensure to staker doesn't
+    // withdraw into the commission portion.
+    request_commission_internal(
+        old_operator,
+        &mut staking_contract,
+        &mut store.add_distribution_events,
+        &mut store.request_commission_events,
+    );
+
+    // Update the staking contract's commission rate and stake pool's operator.
+    stake::set_operator_with_cap(&staking_contract.owner_cap, new_operator);
+    staking_contract.commission_percentage = new_commission_percentage;
+
+    let pool_address = staking_contract.pool_address;
+    simple_map::add(staking_contracts, new_operator, staking_contract);
+    if (std::features::module_event_migration_enabled()) {
+        emit(SwitchOperator { pool_address, old_operator, new_operator });
+    };
+    emit_event(
+        &mut store.switch_operator_events,
+        SwitchOperatorEvent { pool_address, old_operator, new_operator }
+    );
+}
+
+ + + +
+ + + +## Function `set_beneficiary_for_operator` + +Allows an operator to change its beneficiary. Any existing unpaid commission rewards will be paid to the new +beneficiary. To ensures payment to the current beneficiary, one should first call distribute before switching +the beneficiary. An operator can set one beneficiary for staking contract pools, not a separate one for each pool. + + +
public entry fun set_beneficiary_for_operator(operator: &signer, new_beneficiary: address)
+
+ + + +
+Implementation + + +
public entry fun set_beneficiary_for_operator(
+    operator: &signer,
+    new_beneficiary: address
+) acquires BeneficiaryForOperator {
+    assert!(features::operator_beneficiary_change_enabled(), std::error::invalid_state(
+        EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED
+    ));
+    // The beneficiay address of an operator is stored under the operator's address.
+    // So, the operator does not need to be validated with respect to a staking pool.
+    let operator_addr = signer::address_of(operator);
+    let old_beneficiary = beneficiary_for_operator(operator_addr);
+    if (exists<BeneficiaryForOperator>(operator_addr)) {
+        borrow_global_mut<BeneficiaryForOperator>(operator_addr).beneficiary_for_operator = new_beneficiary;
+    } else {
+        move_to(operator, BeneficiaryForOperator { beneficiary_for_operator: new_beneficiary });
+    };
+
+    emit(SetBeneficiaryForOperator {
+        operator: operator_addr,
+        old_beneficiary,
+        new_beneficiary,
+    });
+}
+
+ + + +
+ + + +## Function `distribute` + +Allow anyone to distribute already unlocked funds. This does not affect reward compounding and therefore does +not need to be restricted to just the staker or operator. + + +
public entry fun distribute(staker: address, operator: address)
+
+ + + +
+Implementation + + +
public entry fun distribute(staker: address, operator: address) acquires Store, BeneficiaryForOperator {
+    assert_staking_contract_exists(staker, operator);
+    let store = borrow_global_mut<Store>(staker);
+    let staking_contract = simple_map::borrow_mut(&mut store.staking_contracts, &operator);
+    distribute_internal(staker, operator, staking_contract, &mut store.distribute_events);
+}
+
+ + + +
+ + + +## Function `distribute_internal` + +Distribute all unlocked (inactive) funds according to distribution shares. + + +
fun distribute_internal(staker: address, operator: address, staking_contract: &mut staking_contract::StakingContract, distribute_events: &mut event::EventHandle<staking_contract::DistributeEvent>)
+
+ + + +
+Implementation + + +
fun distribute_internal(
+    staker: address,
+    operator: address,
+    staking_contract: &mut StakingContract,
+    distribute_events: &mut EventHandle<DistributeEvent>,
+) acquires BeneficiaryForOperator {
+    let pool_address = staking_contract.pool_address;
+    let (_, inactive, _, pending_inactive) = stake::get_stake(pool_address);
+    let total_potential_withdrawable = inactive + pending_inactive;
+    let coins = stake::withdraw_with_cap(&staking_contract.owner_cap, total_potential_withdrawable);
+    let distribution_amount = coin::value(&coins);
+    if (distribution_amount == 0) {
+        coin::destroy_zero(coins);
+        return
+    };
+
+    let distribution_pool = &mut staking_contract.distribution_pool;
+    update_distribution_pool(
+        distribution_pool, distribution_amount, operator, staking_contract.commission_percentage);
+
+    // Buy all recipients out of the distribution pool.
+    while (pool_u64::shareholders_count(distribution_pool) > 0) {
+        let recipients = pool_u64::shareholders(distribution_pool);
+        let recipient = *vector::borrow(&mut recipients, 0);
+        let current_shares = pool_u64::shares(distribution_pool, recipient);
+        let amount_to_distribute = pool_u64::redeem_shares(distribution_pool, recipient, current_shares);
+        // If the recipient is the operator, send the commission to the beneficiary instead.
+        if (recipient == operator) {
+            recipient = beneficiary_for_operator(operator);
+        };
+        aptos_account::deposit_coins(recipient, coin::extract(&mut coins, amount_to_distribute));
+
+        if (std::features::module_event_migration_enabled()) {
+            emit(Distribute { operator, pool_address, recipient, amount: amount_to_distribute });
+        };
+        emit_event(
+            distribute_events,
+            DistributeEvent { operator, pool_address, recipient, amount: amount_to_distribute }
+        );
+    };
+
+    // In case there's any dust left, send them all to the staker.
+    if (coin::value(&coins) > 0) {
+        aptos_account::deposit_coins(staker, coins);
+        pool_u64::update_total_coins(distribution_pool, 0);
+    } else {
+        coin::destroy_zero(coins);
+    }
+}
+
+ + + +
+ + + +## Function `assert_staking_contract_exists` + +Assert that a staking_contract exists for the staker/operator pair. + + +
fun assert_staking_contract_exists(staker: address, operator: address)
+
+ + + +
+Implementation + + +
fun assert_staking_contract_exists(staker: address, operator: address) acquires Store {
+    assert!(exists<Store>(staker), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_STAKER));
+    let staking_contracts = &mut borrow_global_mut<Store>(staker).staking_contracts;
+    assert!(
+        simple_map::contains_key(staking_contracts, &operator),
+        error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_OPERATOR),
+    );
+}
+
+ + + +
+ + + +## Function `add_distribution` + +Add a new distribution for recipient and amount to the staking contract's distributions list. + + +
fun add_distribution(operator: address, staking_contract: &mut staking_contract::StakingContract, recipient: address, coins_amount: u64, add_distribution_events: &mut event::EventHandle<staking_contract::AddDistributionEvent>)
+
+ + + +
+Implementation + + +
fun add_distribution(
+    operator: address,
+    staking_contract: &mut StakingContract,
+    recipient: address,
+    coins_amount: u64,
+    add_distribution_events: &mut EventHandle<AddDistributionEvent>
+) {
+    let distribution_pool = &mut staking_contract.distribution_pool;
+    let (_, _, _, total_distribution_amount) = stake::get_stake(staking_contract.pool_address);
+    update_distribution_pool(
+        distribution_pool, total_distribution_amount, operator, staking_contract.commission_percentage);
+
+    pool_u64::buy_in(distribution_pool, recipient, coins_amount);
+    let pool_address = staking_contract.pool_address;
+    if (std::features::module_event_migration_enabled()) {
+        emit(AddDistribution { operator, pool_address, amount: coins_amount });
+    };
+    emit_event(
+        add_distribution_events,
+        AddDistributionEvent { operator, pool_address, amount: coins_amount }
+    );
+}
+
+ + + +
+ + + +## Function `get_staking_contract_amounts_internal` + +Calculate accumulated rewards and commissions since last update. + + +
fun get_staking_contract_amounts_internal(staking_contract: &staking_contract::StakingContract): (u64, u64, u64)
+
+ + + +
+Implementation + + +
fun get_staking_contract_amounts_internal(staking_contract: &StakingContract): (u64, u64, u64) {
+    // Pending_inactive is not included in the calculation because pending_inactive can only come from:
+    // 1. Outgoing commissions. This means commission has already been extracted.
+    // 2. Stake withdrawals from stakers. This also means commission has already been extracted as
+    // request_commission_internal is called in unlock_stake
+    let (active, _, pending_active, _) = stake::get_stake(staking_contract.pool_address);
+    let total_active_stake = active + pending_active;
+    let accumulated_rewards = total_active_stake - staking_contract.principal;
+    let commission_amount = accumulated_rewards * staking_contract.commission_percentage / 100;
+
+    (total_active_stake, accumulated_rewards, commission_amount)
+}
+
+ + + +
+ + + +## Function `create_stake_pool` + + + +
fun create_stake_pool(staker: &signer, operator: address, voter: address, contract_creation_seed: vector<u8>): (signer, account::SignerCapability, stake::OwnerCapability)
+
+ + + +
+Implementation + + +
fun create_stake_pool(
+    staker: &signer,
+    operator: address,
+    voter: address,
+    contract_creation_seed: vector<u8>,
+): (signer, SignerCapability, OwnerCapability) {
+    // Generate a seed that will be used to create the resource account that hosts the staking contract.
+    let seed = create_resource_account_seed(
+        signer::address_of(staker), operator, contract_creation_seed);
+
+    let (stake_pool_signer, stake_pool_signer_cap) = account::create_resource_account(staker, seed);
+    stake::initialize_stake_owner(&stake_pool_signer, 0, operator, voter);
+
+    // Extract owner_cap from the StakePool, so we have control over it in the staking_contracts flow.
+    // This is stored as part of the staking_contract. Thus, the staker would not have direct control over it without
+    // going through well-defined functions in this module.
+    let owner_cap = stake::extract_owner_cap(&stake_pool_signer);
+
+    (stake_pool_signer, stake_pool_signer_cap, owner_cap)
+}
+
+ + + +
+ + + +## Function `update_distribution_pool` + + + +
fun update_distribution_pool(distribution_pool: &mut pool_u64::Pool, updated_total_coins: u64, operator: address, commission_percentage: u64)
+
+ + + +
+Implementation + + +
fun update_distribution_pool(
+    distribution_pool: &mut Pool,
+    updated_total_coins: u64,
+    operator: address,
+    commission_percentage: u64,
+) {
+    // Short-circuit and do nothing if the pool's total value has not changed.
+    if (pool_u64::total_coins(distribution_pool) == updated_total_coins) {
+        return
+    };
+
+    // Charge all stakeholders (except for the operator themselves) commission on any rewards earnt relatively to the
+    // previous value of the distribution pool.
+    let shareholders = &pool_u64::shareholders(distribution_pool);
+    vector::for_each_ref(shareholders, |shareholder| {
+        let shareholder: address = *shareholder;
+        if (shareholder != operator) {
+            let shares = pool_u64::shares(distribution_pool, shareholder);
+            let previous_worth = pool_u64::balance(distribution_pool, shareholder);
+            let current_worth = pool_u64::shares_to_amount_with_total_coins(
+                distribution_pool, shares, updated_total_coins);
+            let unpaid_commission = (current_worth - previous_worth) * commission_percentage / 100;
+            // Transfer shares from current shareholder to the operator as payment.
+            // The value of the shares should use the updated pool's total value.
+            let shares_to_transfer = pool_u64::amount_to_shares_with_total_coins(
+                distribution_pool, unpaid_commission, updated_total_coins);
+            pool_u64::transfer_shares(distribution_pool, shareholder, operator, shares_to_transfer);
+        };
+    });
+
+    pool_u64::update_total_coins(distribution_pool, updated_total_coins);
+}
+
+ + + +
+ + + +## Function `create_resource_account_seed` + +Create the seed to derive the resource account address. + + +
fun create_resource_account_seed(staker: address, operator: address, contract_creation_seed: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun create_resource_account_seed(
+    staker: address,
+    operator: address,
+    contract_creation_seed: vector<u8>,
+): vector<u8> {
+    let seed = bcs::to_bytes(&staker);
+    vector::append(&mut seed, bcs::to_bytes(&operator));
+    // Include a salt to avoid conflicts with any other modules out there that might also generate
+    // deterministic resource accounts for the same staker + operator addresses.
+    vector::append(&mut seed, SALT);
+    // Add an extra salt given by the staker in case an account with the same address has already been created.
+    vector::append(&mut seed, contract_creation_seed);
+    seed
+}
+
+ + + +
+ + + +## Function `new_staking_contracts_holder` + +Create a new staking_contracts resource. + + +
fun new_staking_contracts_holder(staker: &signer): staking_contract::Store
+
+ + + +
+Implementation + + +
fun new_staking_contracts_holder(staker: &signer): Store {
+    Store {
+        staking_contracts: simple_map::create<address, StakingContract>(),
+        // Events.
+        create_staking_contract_events: account::new_event_handle<CreateStakingContractEvent>(staker),
+        update_voter_events: account::new_event_handle<UpdateVoterEvent>(staker),
+        reset_lockup_events: account::new_event_handle<ResetLockupEvent>(staker),
+        add_stake_events: account::new_event_handle<AddStakeEvent>(staker),
+        request_commission_events: account::new_event_handle<RequestCommissionEvent>(staker),
+        unlock_stake_events: account::new_event_handle<UnlockStakeEvent>(staker),
+        switch_operator_events: account::new_event_handle<SwitchOperatorEvent>(staker),
+        add_distribution_events: account::new_event_handle<AddDistributionEvent>(staker),
+        distribute_events: account::new_event_handle<DistributeEvent>(staker),
+    }
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The Store structure for the staker exists after the staking contract is created.MediumThe create_staking_contract_with_coins function ensures that the staker account has a Store structure assigned.Formally verified via CreateStakingContractWithCoinsAbortsifAndEnsures.
2A staking contract is created and stored in a mapping within the Store resource.HighThe create_staking_contract_with_coins function adds the newly created StakingContract to the staking_contracts map with the operator as a key of the Store resource, effectively storing the staking contract.Formally verified via CreateStakingContractWithCoinsAbortsifAndEnsures.
3Adding stake to the stake pool increases the principal value of the pool, reflecting the additional stake amount.HighThe add_stake function transfers the specified amount of staked coins from the staker's account to the stake pool associated with the staking contract. It increases the principal value of the staking contract by the added stake amount.Formally verified via add_stake.
4The staker may update the voter of a staking contract, enabling them to modify the assigned voter address and ensure it accurately reflects their desired choice.HighThe update_voter function ensures that the voter address in a staking contract may be updated by the staker, resulting in the modification of the delegated voter address in the associated stake pool to reflect the new address provided.Formally verified via update_voter.
5Only the owner of the stake pool has the permission to reset the lockup period of the pool.CriticalThe reset_lockup function ensures that only the staker who owns the stake pool has the authority to reset the lockup period of the pool.Formally verified via reset_lockup.
6Unlocked funds are correctly distributed to recipients based on their distribution shares, taking into account the associated commission percentage.HighThe distribution process, implemented in the distribute_internal function, accurately allocates unlocked funds to their intended recipients based on their distribution shares. It guarantees that each recipient receives the correct amount of funds, considering the commission percentage associated with the staking contract.Audited that the correct amount of unlocked funds is distributed according to distribution shares.
7The stake pool ensures that the commission is correctly requested and paid out from the old operator's stake pool before allowing the switch to the new operator.HighThe switch_operator function initiates the commission payout from the stake pool associated with the old operator, ensuring a smooth transition. Paying out the commission before the switch guarantees that the staker receives the appropriate commission amount and maintains the integrity of the staking process.Audited that the commission is paid to the old operator.
8Stakers can withdraw their funds from the staking contract, ensuring the unlocked amount becomes available for withdrawal after the lockup period.HighThe unlock_stake function ensures that the requested amount is properly unlocked from the stake pool, considering the lockup period and that the funds become available for withdrawal when the lockup expires.Audited that funds are unlocked properly.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Struct `StakingContract` + + +
struct StakingContract has store
+
+ + + +
+
+principal: u64 +
+
+ +
+
+pool_address: address +
+
+ +
+
+owner_cap: stake::OwnerCapability +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+distribution_pool: pool_u64::Pool +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + + +
invariant commission_percentage >= 0 && commission_percentage <= 100;
+
+ + + + + +### Function `stake_pool_address` + + +
#[view]
+public fun stake_pool_address(staker: address, operator: address): address
+
+ + + + +
include ContractExistsAbortsIf;
+let staking_contracts = global<Store>(staker).staking_contracts;
+ensures result == simple_map::spec_get(staking_contracts, operator).pool_address;
+
+ + + + + +### Function `last_recorded_principal` + + +
#[view]
+public fun last_recorded_principal(staker: address, operator: address): u64
+
+ + +Staking_contract exists the stacker/operator pair. + + +
include ContractExistsAbortsIf;
+let staking_contracts = global<Store>(staker).staking_contracts;
+ensures result == simple_map::spec_get(staking_contracts, operator).principal;
+
+ + + + + +### Function `commission_percentage` + + +
#[view]
+public fun commission_percentage(staker: address, operator: address): u64
+
+ + +Staking_contract exists the stacker/operator pair. + + +
include ContractExistsAbortsIf;
+let staking_contracts = global<Store>(staker).staking_contracts;
+ensures result == simple_map::spec_get(staking_contracts, operator).commission_percentage;
+
+ + + + + +### Function `staking_contract_amounts` + + +
#[view]
+public fun staking_contract_amounts(staker: address, operator: address): (u64, u64, u64)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
pragma verify_duration_estimate = 120;
+let staking_contracts = global<Store>(staker).staking_contracts;
+let staking_contract = simple_map::spec_get(staking_contracts, operator);
+include ContractExistsAbortsIf;
+include GetStakingContractAmountsAbortsIf { staking_contract };
+let pool_address = staking_contract.pool_address;
+let stake_pool = global<stake::StakePool>(pool_address);
+let active = coin::value(stake_pool.active);
+let pending_active = coin::value(stake_pool.pending_active);
+let total_active_stake = active + pending_active;
+let accumulated_rewards = total_active_stake - staking_contract.principal;
+ensures result_1 == total_active_stake;
+ensures result_2 == accumulated_rewards;
+
+ + + + + +### Function `pending_distribution_counts` + + +
#[view]
+public fun pending_distribution_counts(staker: address, operator: address): u64
+
+ + +Staking_contract exists the stacker/operator pair. + + +
include ContractExistsAbortsIf;
+let staking_contracts = global<Store>(staker).staking_contracts;
+let staking_contract = simple_map::spec_get(staking_contracts, operator);
+let shareholders_count = len(staking_contract.distribution_pool.shareholders);
+ensures result == shareholders_count;
+
+ + + + + +### Function `staking_contract_exists` + + +
#[view]
+public fun staking_contract_exists(staker: address, operator: address): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_staking_contract_exists(staker, operator);
+
+ + + + + + + +
fun spec_staking_contract_exists(staker: address, operator: address): bool {
+   if (!exists<Store>(staker)) {
+       false
+   } else {
+       let store = global<Store>(staker);
+       simple_map::spec_contains_key(store.staking_contracts, operator)
+   }
+}
+
+ + + + + +### Function `beneficiary_for_operator` + + +
#[view]
+public fun beneficiary_for_operator(operator: address): address
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `create_staking_contract` + + +
public entry fun create_staking_contract(staker: &signer, operator: address, voter: address, amount: u64, commission_percentage: u64, contract_creation_seed: vector<u8>)
+
+ + +Account is not frozen and sufficient to withdraw. + + +
pragma aborts_if_is_partial;
+pragma verify_duration_estimate = 120;
+include PreconditionsInCreateContract;
+include WithdrawAbortsIf<AptosCoin> { account: staker };
+include CreateStakingContractWithCoinsAbortsIfAndEnsures;
+
+ + + + + +### Function `create_staking_contract_with_coins` + + +
public fun create_staking_contract_with_coins(staker: &signer, operator: address, voter: address, coins: coin::Coin<aptos_coin::AptosCoin>, commission_percentage: u64, contract_creation_seed: vector<u8>): address
+
+ + +The amount should be at least the min_stake_required, so the stake pool will be eligible to join the validator set. +Initialize Store resource if this is the first time the staker has delegated to anyone. +Cannot create the staking contract if it already exists. + + +
pragma verify_duration_estimate = 120;
+pragma aborts_if_is_partial;
+include PreconditionsInCreateContract;
+let amount = coins.value;
+include CreateStakingContractWithCoinsAbortsIfAndEnsures { amount };
+
+ + + + + +### Function `add_stake` + + +
public entry fun add_stake(staker: &signer, operator: address, amount: u64)
+
+ + +Account is not frozen and sufficient to withdraw. +Staking_contract exists the stacker/operator pair. + + +
pragma verify_duration_estimate = 600;
+include stake::ResourceRequirement;
+aborts_if reconfiguration_state::spec_is_in_progress();
+let staker_address = signer::address_of(staker);
+include ContractExistsAbortsIf { staker: staker_address };
+let store = global<Store>(staker_address);
+let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+include WithdrawAbortsIf<AptosCoin> { account: staker };
+let balance = global<coin::CoinStore<AptosCoin>>(staker_address).coin.value;
+let post post_coin = global<coin::CoinStore<AptosCoin>>(staker_address).coin.value;
+ensures post_coin == balance - amount;
+let owner_cap = staking_contract.owner_cap;
+include stake::AddStakeWithCapAbortsIfAndEnsures { owner_cap };
+let post post_store = global<Store>(staker_address);
+let post post_staking_contract = simple_map::spec_get(post_store.staking_contracts, operator);
+aborts_if staking_contract.principal + amount > MAX_U64;
+// This enforces high-level requirement 3:
+ensures post_staking_contract.principal == staking_contract.principal + amount;
+
+ + + + + +### Function `update_voter` + + +
public entry fun update_voter(staker: &signer, operator: address, new_voter: address)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
let staker_address = signer::address_of(staker);
+include UpdateVoterSchema { staker: staker_address };
+let post store = global<Store>(staker_address);
+let post staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+let post pool_address = staking_contract.owner_cap.pool_address;
+let post new_delegated_voter = global<stake::StakePool>(pool_address).delegated_voter;
+// This enforces high-level requirement 4:
+ensures new_delegated_voter == new_voter;
+
+ + + + + +### Function `reset_lockup` + + +
public entry fun reset_lockup(staker: &signer, operator: address)
+
+ + +Staking_contract exists the stacker/operator pair. +Only active validator can update locked_until_secs. + + +
let staker_address = signer::address_of(staker);
+// This enforces high-level requirement 5:
+include ContractExistsAbortsIf { staker: staker_address };
+include IncreaseLockupWithCapAbortsIf { staker: staker_address };
+
+ + + + + +### Function `update_commision` + + +
public entry fun update_commision(staker: &signer, operator: address, new_commission_percentage: u64)
+
+ + + + +
pragma verify = false;
+let staker_address = signer::address_of(staker);
+aborts_if new_commission_percentage > 100;
+include ContractExistsAbortsIf { staker: staker_address };
+
+ + + + + +### Function `request_commission` + + +
public entry fun request_commission(account: &signer, staker: address, operator: address)
+
+ + +Only staker or operator can call this. + + +
pragma verify = false;
+let account_addr = signer::address_of(account);
+include ContractExistsAbortsIf { staker };
+aborts_if account_addr != staker && account_addr != operator;
+
+ + + + + +### Function `request_commission_internal` + + +
fun request_commission_internal(operator: address, staking_contract: &mut staking_contract::StakingContract, add_distribution_events: &mut event::EventHandle<staking_contract::AddDistributionEvent>, request_commission_events: &mut event::EventHandle<staking_contract::RequestCommissionEvent>): u64
+
+ + + + +
pragma verify = false;
+include GetStakingContractAmountsAbortsIf;
+
+ + + + + +### Function `unlock_stake` + + +
public entry fun unlock_stake(staker: &signer, operator: address, amount: u64)
+
+ + + + +
pragma verify = false;
+let staker_address = signer::address_of(staker);
+include ContractExistsAbortsIf { staker: staker_address };
+
+ + + + + +### Function `unlock_rewards` + + +
public entry fun unlock_rewards(staker: &signer, operator: address)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
pragma verify = false;
+let staker_address = signer::address_of(staker);
+let staking_contracts = global<Store>(staker_address).staking_contracts;
+let staking_contract = simple_map::spec_get(staking_contracts, operator);
+include ContractExistsAbortsIf { staker: staker_address };
+
+ + + + + +### Function `switch_operator_with_same_commission` + + +
public entry fun switch_operator_with_same_commission(staker: &signer, old_operator: address, new_operator: address)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
pragma verify_duration_estimate = 120;
+pragma aborts_if_is_partial;
+let staker_address = signer::address_of(staker);
+include ContractExistsAbortsIf { staker: staker_address, operator: old_operator };
+
+ + + + + +### Function `switch_operator` + + +
public entry fun switch_operator(staker: &signer, old_operator: address, new_operator: address, new_commission_percentage: u64)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
pragma verify = false;
+let staker_address = signer::address_of(staker);
+include ContractExistsAbortsIf { staker: staker_address, operator: old_operator };
+let store = global<Store>(staker_address);
+let staking_contracts = store.staking_contracts;
+aborts_if simple_map::spec_contains_key(staking_contracts, new_operator);
+
+ + + + + +### Function `set_beneficiary_for_operator` + + +
public entry fun set_beneficiary_for_operator(operator: &signer, new_beneficiary: address)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `distribute` + + +
public entry fun distribute(staker: address, operator: address)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
pragma verify_duration_estimate = 120;
+pragma aborts_if_is_partial;
+include ContractExistsAbortsIf;
+
+ + + + + +### Function `distribute_internal` + + +
fun distribute_internal(staker: address, operator: address, staking_contract: &mut staking_contract::StakingContract, distribute_events: &mut event::EventHandle<staking_contract::DistributeEvent>)
+
+ + +The StakePool exists under the pool_address of StakingContract. +The value of inactive and pending_inactive in the stake_pool is up to MAX_U64. + + +
pragma verify_duration_estimate = 120;
+pragma aborts_if_is_partial;
+let pool_address = staking_contract.pool_address;
+let stake_pool = borrow_global<stake::StakePool>(pool_address);
+aborts_if !exists<stake::StakePool>(pool_address);
+aborts_if stake_pool.inactive.value + stake_pool.pending_inactive.value > MAX_U64;
+aborts_if !exists<stake::StakePool>(staking_contract.owner_cap.pool_address);
+
+ + + + + +### Function `assert_staking_contract_exists` + + +
fun assert_staking_contract_exists(staker: address, operator: address)
+
+ + +Staking_contract exists the stacker/operator pair. + + +
include ContractExistsAbortsIf;
+
+ + + + + +### Function `add_distribution` + + +
fun add_distribution(operator: address, staking_contract: &mut staking_contract::StakingContract, recipient: address, coins_amount: u64, add_distribution_events: &mut event::EventHandle<staking_contract::AddDistributionEvent>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `get_staking_contract_amounts_internal` + + +
fun get_staking_contract_amounts_internal(staking_contract: &staking_contract::StakingContract): (u64, u64, u64)
+
+ + +The StakePool exists under the pool_address of StakingContract. + + +
pragma verify_duration_estimate = 120;
+include GetStakingContractAmountsAbortsIf;
+let pool_address = staking_contract.pool_address;
+let stake_pool = global<stake::StakePool>(pool_address);
+let active = coin::value(stake_pool.active);
+let pending_active = coin::value(stake_pool.pending_active);
+let total_active_stake = active + pending_active;
+let accumulated_rewards = total_active_stake - staking_contract.principal;
+let commission_amount = accumulated_rewards * staking_contract.commission_percentage / 100;
+ensures result_1 == total_active_stake;
+ensures result_2 == accumulated_rewards;
+ensures result_3 == commission_amount;
+
+ + + + + +### Function `create_stake_pool` + + +
fun create_stake_pool(staker: &signer, operator: address, voter: address, contract_creation_seed: vector<u8>): (signer, account::SignerCapability, stake::OwnerCapability)
+
+ + + + +
pragma verify_duration_estimate = 120;
+include stake::ResourceRequirement;
+let staker_address = signer::address_of(staker);
+let seed_0 = bcs::to_bytes(staker_address);
+let seed_1 = concat(concat(concat(seed_0, bcs::to_bytes(operator)), SALT), contract_creation_seed);
+let resource_addr = account::spec_create_resource_address(staker_address, seed_1);
+include CreateStakePoolAbortsIf { resource_addr };
+ensures exists<account::Account>(resource_addr);
+let post post_account = global<account::Account>(resource_addr);
+ensures post_account.authentication_key == account::ZERO_AUTH_KEY;
+ensures post_account.signer_capability_offer.for == std::option::spec_some(resource_addr);
+ensures exists<stake::StakePool>(resource_addr);
+let post post_owner_cap = global<stake::OwnerCapability>(resource_addr);
+let post post_pool_address = post_owner_cap.pool_address;
+let post post_stake_pool = global<stake::StakePool>(post_pool_address);
+let post post_operator = post_stake_pool.operator_address;
+let post post_delegated_voter = post_stake_pool.delegated_voter;
+ensures resource_addr != operator ==> post_operator == operator;
+ensures resource_addr != voter ==> post_delegated_voter == voter;
+ensures signer::address_of(result_1) == resource_addr;
+ensures result_2 == SignerCapability { account: resource_addr };
+ensures result_3 == OwnerCapability { pool_address: resource_addr };
+
+ + + + + +### Function `update_distribution_pool` + + +
fun update_distribution_pool(distribution_pool: &mut pool_u64::Pool, updated_total_coins: u64, operator: address, commission_percentage: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+
+ + + + + +### Function `new_staking_contracts_holder` + + +
fun new_staking_contracts_holder(staker: &signer): staking_contract::Store
+
+ + +The Account exists under the staker. +The guid_creation_num of the account resource is up to MAX_U64. + + +
include NewStakingContractsHolderAbortsIf;
+
+ + + + + + + +
schema NewStakingContractsHolderAbortsIf {
+    staker: signer;
+    let addr = signer::address_of(staker);
+    let account = global<account::Account>(addr);
+    aborts_if !exists<account::Account>(addr);
+    aborts_if account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if account.guid_creation_num + 9 > MAX_U64;
+}
+
+ + +The Store exists under the staker. +a staking_contract exists for the staker/operator pair. + + + + + +
schema ContractExistsAbortsIf {
+    staker: address;
+    operator: address;
+    aborts_if !exists<Store>(staker);
+    let staking_contracts = global<Store>(staker).staking_contracts;
+    aborts_if !simple_map::spec_contains_key(staking_contracts, operator);
+}
+
+ + + + + + + +
schema UpdateVoterSchema {
+    staker: address;
+    operator: address;
+    let store = global<Store>(staker);
+    let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+    let pool_address = staking_contract.pool_address;
+    aborts_if !exists<stake::StakePool>(pool_address);
+    aborts_if !exists<stake::StakePool>(staking_contract.owner_cap.pool_address);
+    include ContractExistsAbortsIf;
+}
+
+ + + + + + + +
schema WithdrawAbortsIf<CoinType> {
+    account: signer;
+    amount: u64;
+    let account_addr = signer::address_of(account);
+    let coin_store = global<coin::CoinStore<CoinType>>(account_addr);
+    let balance = coin_store.coin.value;
+    aborts_if !exists<coin::CoinStore<CoinType>>(account_addr);
+    aborts_if coin_store.frozen;
+    aborts_if balance < amount;
+}
+
+ + + + + + + +
schema GetStakingContractAmountsAbortsIf {
+    staking_contract: StakingContract;
+    let pool_address = staking_contract.pool_address;
+    let stake_pool = global<stake::StakePool>(pool_address);
+    let active = coin::value(stake_pool.active);
+    let pending_active = coin::value(stake_pool.pending_active);
+    let total_active_stake = active + pending_active;
+    let accumulated_rewards = total_active_stake - staking_contract.principal;
+    aborts_if !exists<stake::StakePool>(pool_address);
+    aborts_if active + pending_active > MAX_U64;
+    aborts_if total_active_stake < staking_contract.principal;
+    aborts_if accumulated_rewards * staking_contract.commission_percentage > MAX_U64;
+}
+
+ + + + + + + +
schema IncreaseLockupWithCapAbortsIf {
+    staker: address;
+    operator: address;
+    let store = global<Store>(staker);
+    let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+    let pool_address = staking_contract.owner_cap.pool_address;
+    aborts_if !stake::stake_pool_exists(pool_address);
+    aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+    let config = global<staking_config::StakingConfig>(@aptos_framework);
+    let stake_pool = global<stake::StakePool>(pool_address);
+    let old_locked_until_secs = stake_pool.locked_until_secs;
+    let seconds = global<timestamp::CurrentTimeMicroseconds>(
+        @aptos_framework
+    ).microseconds / timestamp::MICRO_CONVERSION_FACTOR;
+    let new_locked_until_secs = seconds + config.recurring_lockup_duration_secs;
+    aborts_if seconds + config.recurring_lockup_duration_secs > MAX_U64;
+    aborts_if old_locked_until_secs > new_locked_until_secs || old_locked_until_secs == new_locked_until_secs;
+    aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    let post post_store = global<Store>(staker);
+    let post post_staking_contract = simple_map::spec_get(post_store.staking_contracts, operator);
+    let post post_stake_pool = global<stake::StakePool>(post_staking_contract.owner_cap.pool_address);
+    ensures post_stake_pool.locked_until_secs == new_locked_until_secs;
+}
+
+ + + + + + + +
schema CreateStakingContractWithCoinsAbortsIfAndEnsures {
+    staker: signer;
+    operator: address;
+    voter: address;
+    amount: u64;
+    commission_percentage: u64;
+    contract_creation_seed: vector<u8>;
+    aborts_if commission_percentage > 100;
+    aborts_if !exists<staking_config::StakingConfig>(@aptos_framework);
+    let config = global<staking_config::StakingConfig>(@aptos_framework);
+    let min_stake_required = config.minimum_stake;
+    aborts_if amount < min_stake_required;
+    let staker_address = signer::address_of(staker);
+    let account = global<account::Account>(staker_address);
+    aborts_if !exists<Store>(staker_address) && !exists<account::Account>(staker_address);
+    aborts_if !exists<Store>(staker_address) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
+    // This enforces high-level requirement 1:
+    ensures exists<Store>(staker_address);
+    let store = global<Store>(staker_address);
+    let staking_contracts = store.staking_contracts;
+    let owner_cap = simple_map::spec_get(store.staking_contracts, operator).owner_cap;
+    let post post_store = global<Store>(staker_address);
+    let post post_staking_contracts = post_store.staking_contracts;
+}
+
+ + + + + + + +
schema PreconditionsInCreateContract {
+    requires exists<stake::ValidatorPerformance>(@aptos_framework);
+    requires exists<stake::ValidatorSet>(@aptos_framework);
+    requires exists<staking_config::StakingRewardsConfig>(
+        @aptos_framework
+    ) || !std::features::spec_periodical_reward_rate_decrease_enabled();
+    requires exists<stake::ValidatorFees>(@aptos_framework);
+    requires exists<aptos_framework::timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    requires exists<stake::AptosCoinCapabilities>(@aptos_framework);
+}
+
+ + + + + + + +
schema CreateStakePoolAbortsIf {
+    resource_addr: address;
+    operator: address;
+    voter: address;
+    contract_creation_seed: vector<u8>;
+    let acc = global<account::Account>(resource_addr);
+    aborts_if exists<account::Account>(resource_addr) && (len(
+        acc.signer_capability_offer.for.vec
+    ) != 0 || acc.sequence_number != 0);
+    aborts_if !exists<account::Account>(resource_addr) && len(bcs::to_bytes(resource_addr)) != 32;
+    aborts_if len(account::ZERO_AUTH_KEY) != 32;
+    aborts_if exists<stake::ValidatorConfig>(resource_addr);
+    let allowed = global<stake::AllowedValidators>(@aptos_framework);
+    aborts_if exists<stake::AllowedValidators>(@aptos_framework) && !contains(allowed.accounts, resource_addr);
+    aborts_if exists<stake::StakePool>(resource_addr);
+    aborts_if exists<stake::OwnerCapability>(resource_addr);
+    aborts_if exists<account::Account>(
+        resource_addr
+    ) && acc.guid_creation_num + 12 >= account::MAX_GUID_CREATION_NUM;
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_proxy.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_proxy.md new file mode 100644 index 0000000000000..17cd7ab0c01ff --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/staking_proxy.md @@ -0,0 +1,566 @@ + + + +# Module `0x1::staking_proxy` + + + +- [Function `set_operator`](#0x1_staking_proxy_set_operator) +- [Function `set_voter`](#0x1_staking_proxy_set_voter) +- [Function `set_vesting_contract_operator`](#0x1_staking_proxy_set_vesting_contract_operator) +- [Function `set_staking_contract_operator`](#0x1_staking_proxy_set_staking_contract_operator) +- [Function `set_stake_pool_operator`](#0x1_staking_proxy_set_stake_pool_operator) +- [Function `set_vesting_contract_voter`](#0x1_staking_proxy_set_vesting_contract_voter) +- [Function `set_staking_contract_voter`](#0x1_staking_proxy_set_staking_contract_voter) +- [Function `set_stake_pool_voter`](#0x1_staking_proxy_set_stake_pool_voter) +- [Specification](#@Specification_0) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `set_operator`](#@Specification_0_set_operator) + - [Function `set_voter`](#@Specification_0_set_voter) + - [Function `set_vesting_contract_operator`](#@Specification_0_set_vesting_contract_operator) + - [Function `set_staking_contract_operator`](#@Specification_0_set_staking_contract_operator) + - [Function `set_stake_pool_operator`](#@Specification_0_set_stake_pool_operator) + - [Function `set_vesting_contract_voter`](#@Specification_0_set_vesting_contract_voter) + - [Function `set_staking_contract_voter`](#@Specification_0_set_staking_contract_voter) + - [Function `set_stake_pool_voter`](#@Specification_0_set_stake_pool_voter) + + +
use 0x1::signer;
+use 0x1::stake;
+use 0x1::staking_contract;
+use 0x1::vesting;
+
+ + + + + +## Function `set_operator` + + + +
public entry fun set_operator(owner: &signer, old_operator: address, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun set_operator(owner: &signer, old_operator: address, new_operator: address) {
+    set_vesting_contract_operator(owner, old_operator, new_operator);
+    set_staking_contract_operator(owner, old_operator, new_operator);
+    set_stake_pool_operator(owner, new_operator);
+}
+
+ + + +
+ + + +## Function `set_voter` + + + +
public entry fun set_voter(owner: &signer, operator: address, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun set_voter(owner: &signer, operator: address, new_voter: address) {
+    set_vesting_contract_voter(owner, operator, new_voter);
+    set_staking_contract_voter(owner, operator, new_voter);
+    set_stake_pool_voter(owner, new_voter);
+}
+
+ + + +
+ + + +## Function `set_vesting_contract_operator` + + + +
public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address) {
+    let owner_address = signer::address_of(owner);
+    let vesting_contracts = &vesting::vesting_contracts(owner_address);
+    vector::for_each_ref(vesting_contracts, |vesting_contract| {
+        let vesting_contract = *vesting_contract;
+        if (vesting::operator(vesting_contract) == old_operator) {
+            let current_commission_percentage = vesting::operator_commission_percentage(vesting_contract);
+            vesting::update_operator(owner, vesting_contract, new_operator, current_commission_percentage);
+        };
+    });
+}
+
+ + + +
+ + + +## Function `set_staking_contract_operator` + + + +
public entry fun set_staking_contract_operator(owner: &signer, old_operator: address, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun set_staking_contract_operator(owner: &signer, old_operator: address, new_operator: address) {
+    let owner_address = signer::address_of(owner);
+    if (staking_contract::staking_contract_exists(owner_address, old_operator)) {
+        let current_commission_percentage = staking_contract::commission_percentage(owner_address, old_operator);
+        staking_contract::switch_operator(owner, old_operator, new_operator, current_commission_percentage);
+    };
+}
+
+ + + +
+ + + +## Function `set_stake_pool_operator` + + + +
public entry fun set_stake_pool_operator(owner: &signer, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun set_stake_pool_operator(owner: &signer, new_operator: address) {
+    let owner_address = signer::address_of(owner);
+    if (stake::stake_pool_exists(owner_address)) {
+        stake::set_operator(owner, new_operator);
+    };
+}
+
+ + + +
+ + + +## Function `set_vesting_contract_voter` + + + +
public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address) {
+    let owner_address = signer::address_of(owner);
+    let vesting_contracts = &vesting::vesting_contracts(owner_address);
+    vector::for_each_ref(vesting_contracts, |vesting_contract| {
+        let vesting_contract = *vesting_contract;
+        if (vesting::operator(vesting_contract) == operator) {
+            vesting::update_voter(owner, vesting_contract, new_voter);
+        };
+    });
+}
+
+ + + +
+ + + +## Function `set_staking_contract_voter` + + + +
public entry fun set_staking_contract_voter(owner: &signer, operator: address, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun set_staking_contract_voter(owner: &signer, operator: address, new_voter: address) {
+    let owner_address = signer::address_of(owner);
+    if (staking_contract::staking_contract_exists(owner_address, operator)) {
+        staking_contract::update_voter(owner, operator, new_voter);
+    };
+}
+
+ + + +
+ + + +## Function `set_stake_pool_voter` + + + +
public entry fun set_stake_pool_voter(owner: &signer, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun set_stake_pool_voter(owner: &signer, new_voter: address) {
+    if (stake::stake_pool_exists(signer::address_of(owner))) {
+        stake::set_delegated_voter(owner, new_voter);
+    };
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1When updating the Vesting operator, it should be updated throughout all depending units.MediumThe VestingContract contains a StakingInfo object that has an operator field, and this operator is mapped to a StakingContract object that in turn encompasses a StakePool object where the operator matches.Audited that it ensures the two operator fields hold the new value after the update.
2When updating the Vesting voter, it should be updated throughout all depending units.MediumThe VestingContract contains a StakingInfo object that has an operator field, and this operator is mapped to a StakingContract object that in turn encompasses a StakePool object where the operator matches.Audited that it ensures the two operator fields hold the new value after the update.
3The operator and voter of a Vesting Contract should only be updated by the owner of the contract.HighThe owner-operator-voter model, as defined in the documentation, grants distinct abilities to each role. Therefore, it's crucial to ensure that only the owner has the authority to modify the operator or voter, to prevent the compromise of the StakePool.Audited that it ensures the signer owns the AdminStore resource and that the operator or voter intended for the update actually exists.
4The operator and voter of a Staking Contract should only be updated by the owner of the contract.HighThe owner-operator-voter model, as defined in the documentation, grants distinct abilities to each role. Therefore, it's crucial to ensure that only the owner has the authority to modify the operator or voter, to prevent the compromise of the StakePool.Audited the patterns of updating operators and voters in the staking contract.
5Staking Contract's operators should be unique inside a store.MediumDuplicates among operators could result in incorrectly updating the operator or voter associated with the incorrect StakingContract.Enforced via SimpleMap.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `set_operator` + + +
public entry fun set_operator(owner: &signer, old_operator: address, new_operator: address)
+
+ + +Aborts if conditions of SetStakePoolOperator are not met + + +
pragma verify = false;
+pragma aborts_if_is_partial;
+include SetStakePoolOperator;
+include SetStakingContractOperator;
+
+ + + + + +### Function `set_voter` + + +
public entry fun set_voter(owner: &signer, operator: address, new_voter: address)
+
+ + +Aborts if conditions of SetStackingContractVoter and SetStackPoolVoterAbortsIf are not met + + +
pragma aborts_if_is_partial;
+include SetStakingContractVoter;
+include SetStakePoolVoterAbortsIf;
+
+ + + + + +### Function `set_vesting_contract_operator` + + +
public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `set_staking_contract_operator` + + +
public entry fun set_staking_contract_operator(owner: &signer, old_operator: address, new_operator: address)
+
+ + + + +
pragma aborts_if_is_partial;
+pragma verify = false;
+include SetStakingContractOperator;
+
+ + + + + + + +
schema SetStakingContractOperator {
+    owner: signer;
+    old_operator: address;
+    new_operator: address;
+    let owner_address = signer::address_of(owner);
+    let store = global<Store>(owner_address);
+    let staking_contract_exists = exists<Store>(owner_address) && simple_map::spec_contains_key(store.staking_contracts, old_operator);
+    aborts_if staking_contract_exists && simple_map::spec_contains_key(store.staking_contracts, new_operator);
+    let post post_store = global<Store>(owner_address);
+    ensures staking_contract_exists ==> !simple_map::spec_contains_key(post_store.staking_contracts, old_operator);
+    let staking_contract = simple_map::spec_get(store.staking_contracts, old_operator);
+    let stake_pool = global<stake::StakePool>(staking_contract.pool_address);
+    let active = coin::value(stake_pool.active);
+    let pending_active = coin::value(stake_pool.pending_active);
+    let total_active_stake = active + pending_active;
+    let accumulated_rewards = total_active_stake - staking_contract.principal;
+    let commission_amount = accumulated_rewards * staking_contract.commission_percentage / 100;
+    aborts_if staking_contract_exists && !exists<stake::StakePool>(staking_contract.pool_address);
+    ensures staking_contract_exists ==>
+        simple_map::spec_get(post_store.staking_contracts, new_operator).principal == total_active_stake - commission_amount;
+    let pool_address = staking_contract.owner_cap.pool_address;
+    let current_commission_percentage = staking_contract.commission_percentage;
+    aborts_if staking_contract_exists && commission_amount != 0 && !exists<stake::StakePool>(pool_address);
+    ensures staking_contract_exists && commission_amount != 0 ==>
+        global<stake::StakePool>(pool_address).operator_address == new_operator
+        && simple_map::spec_get(post_store.staking_contracts, new_operator).commission_percentage == current_commission_percentage;
+    ensures staking_contract_exists ==> simple_map::spec_contains_key(post_store.staking_contracts, new_operator);
+}
+
+ + + + + +### Function `set_stake_pool_operator` + + +
public entry fun set_stake_pool_operator(owner: &signer, new_operator: address)
+
+ + +Aborts if stake_pool is exists and when OwnerCapability or stake_pool_exists +One of them are not exists + + +
include SetStakePoolOperator;
+
+ + + + + + + +
schema SetStakePoolOperator {
+    owner: &signer;
+    new_operator: address;
+    let owner_address = signer::address_of(owner);
+    let ownership_cap = borrow_global<stake::OwnerCapability>(owner_address);
+    let pool_address = ownership_cap.pool_address;
+    aborts_if stake::stake_pool_exists(owner_address) && !(exists<stake::OwnerCapability>(owner_address) && stake::stake_pool_exists(pool_address));
+    ensures stake::stake_pool_exists(owner_address) ==> global<stake::StakePool>(pool_address).operator_address == new_operator;
+}
+
+ + + + + +### Function `set_vesting_contract_voter` + + +
public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `set_staking_contract_voter` + + +
public entry fun set_staking_contract_voter(owner: &signer, operator: address, new_voter: address)
+
+ + + + +
include SetStakingContractVoter;
+
+ + +Make sure staking_contract_exists first +Then abort if the resource is not exist + + + + + +
schema SetStakingContractVoter {
+    owner: &signer;
+    operator: address;
+    new_voter: address;
+    let owner_address = signer::address_of(owner);
+    let staker = owner_address;
+    let store = global<Store>(staker);
+    let staking_contract_exists = exists<Store>(staker) && simple_map::spec_contains_key(store.staking_contracts, operator);
+    let staker_address = owner_address;
+    let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+    let pool_address = staking_contract.pool_address;
+    let pool_address1 = staking_contract.owner_cap.pool_address;
+    aborts_if staking_contract_exists && !exists<stake::StakePool>(pool_address);
+    aborts_if staking_contract_exists && !exists<stake::StakePool>(staking_contract.owner_cap.pool_address);
+    ensures staking_contract_exists ==> global<stake::StakePool>(pool_address1).delegated_voter == new_voter;
+}
+
+ + + + + +### Function `set_stake_pool_voter` + + +
public entry fun set_stake_pool_voter(owner: &signer, new_voter: address)
+
+ + + + +
include SetStakePoolVoterAbortsIf;
+
+ + + + + + + +
schema SetStakePoolVoterAbortsIf {
+    owner: &signer;
+    new_voter: address;
+    let owner_address = signer::address_of(owner);
+    let ownership_cap = global<stake::OwnerCapability>(owner_address);
+    let pool_address = ownership_cap.pool_address;
+    aborts_if stake::stake_pool_exists(owner_address) && !(exists<stake::OwnerCapability>(owner_address) && stake::stake_pool_exists(pool_address));
+    ensures stake::stake_pool_exists(owner_address) ==> global<stake::StakePool>(pool_address).delegated_voter == new_voter;
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/state_storage.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/state_storage.md new file mode 100644 index 0000000000000..d73e08033129a --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/state_storage.md @@ -0,0 +1,456 @@ + + + +# Module `0x1::state_storage` + + + +- [Struct `Usage`](#0x1_state_storage_Usage) +- [Resource `StateStorageUsage`](#0x1_state_storage_StateStorageUsage) +- [Resource `GasParameter`](#0x1_state_storage_GasParameter) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_state_storage_initialize) +- [Function `on_new_block`](#0x1_state_storage_on_new_block) +- [Function `current_items_and_bytes`](#0x1_state_storage_current_items_and_bytes) +- [Function `get_state_storage_usage_only_at_epoch_beginning`](#0x1_state_storage_get_state_storage_usage_only_at_epoch_beginning) +- [Function `on_reconfig`](#0x1_state_storage_on_reconfig) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `on_new_block`](#@Specification_1_on_new_block) + - [Function `current_items_and_bytes`](#@Specification_1_current_items_and_bytes) + - [Function `get_state_storage_usage_only_at_epoch_beginning`](#@Specification_1_get_state_storage_usage_only_at_epoch_beginning) + - [Function `on_reconfig`](#@Specification_1_on_reconfig) + + +
use 0x1::error;
+use 0x1::system_addresses;
+
+ + + + + +## Struct `Usage` + + + +
struct Usage has copy, drop, store
+
+ + + +
+Fields + + +
+
+items: u64 +
+
+ +
+
+bytes: u64 +
+
+ +
+
+ + +
+ + + +## Resource `StateStorageUsage` + +This is updated at the beginning of each epoch, reflecting the storage +usage after the last txn of the previous epoch is committed. + + +
struct StateStorageUsage has store, key
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+usage: state_storage::Usage +
+
+ +
+
+ + +
+ + + +## Resource `GasParameter` + + + +
struct GasParameter has store, key
+
+ + + +
+Fields + + +
+
+usage: state_storage::Usage +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ESTATE_STORAGE_USAGE: u64 = 0;
+
+ + + + + +## Function `initialize` + + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        !exists<StateStorageUsage>(@aptos_framework),
+        error::already_exists(ESTATE_STORAGE_USAGE)
+    );
+    move_to(aptos_framework, StateStorageUsage {
+        epoch: 0,
+        usage: Usage {
+            items: 0,
+            bytes: 0,
+        }
+    });
+}
+
+ + + +
+ + + +## Function `on_new_block` + + + +
public(friend) fun on_new_block(epoch: u64)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_block(epoch: u64) acquires StateStorageUsage {
+    assert!(
+        exists<StateStorageUsage>(@aptos_framework),
+        error::not_found(ESTATE_STORAGE_USAGE)
+    );
+    let usage = borrow_global_mut<StateStorageUsage>(@aptos_framework);
+    if (epoch != usage.epoch) {
+        usage.epoch = epoch;
+        usage.usage = get_state_storage_usage_only_at_epoch_beginning();
+    }
+}
+
+ + + +
+ + + +## Function `current_items_and_bytes` + + + +
public(friend) fun current_items_and_bytes(): (u64, u64)
+
+ + + +
+Implementation + + +
public(friend) fun current_items_and_bytes(): (u64, u64) acquires StateStorageUsage {
+    assert!(
+        exists<StateStorageUsage>(@aptos_framework),
+        error::not_found(ESTATE_STORAGE_USAGE)
+    );
+    let usage = borrow_global<StateStorageUsage>(@aptos_framework);
+    (usage.usage.items, usage.usage.bytes)
+}
+
+ + + +
+ + + +## Function `get_state_storage_usage_only_at_epoch_beginning` + +Warning: the result returned is based on the base state view held by the +VM for the entire block or chunk of transactions, it's only deterministic +if called from the first transaction of the block because the execution layer +guarantees a fresh state view then. + + +
fun get_state_storage_usage_only_at_epoch_beginning(): state_storage::Usage
+
+ + + +
+Implementation + + +
native fun get_state_storage_usage_only_at_epoch_beginning(): Usage;
+
+ + + +
+ + + +## Function `on_reconfig` + + + +
public(friend) fun on_reconfig()
+
+ + + +
+Implementation + + +
public(friend) fun on_reconfig() {
+    abort 0
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Given the blockchain is in an operating state, the resources for tracking state storage usage and gas parameters must exist for the Aptos framework address.CriticalThe initialize function ensures only the Aptos framework address can call it.Formally verified via module.
2During the initialization of the module, it is guaranteed that the resource for tracking state storage usage will be moved under the Aptos framework account with default initial values.MediumThe resource for tracking state storage usage may only be initialized with specific values and published under the aptos_framework account.Formally verified via initialize.
3The initialization function is only called once, during genesis.MediumThe initialize function ensures StateStorageUsage does not already exist.Formally verified via initialize.
4During the initialization of the module, it is guaranteed that the resource for tracking state storage usage will be moved under the Aptos framework account with default initial values.MediumThe resource for tracking state storage usage may only be initialized with specific values and published under the aptos_framework account.Formally verified via initialize.
5The structure for tracking state storage usage should exist for it to be updated at the beginning of each new block and for retrieving the values of structure members.MediumThe functions on_new_block and current_items_and_bytes verify that the StateStorageUsage structure exists before performing any further operations.Formally Verified via current_items_and_bytes, on_new_block, and the global invariant.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+// This enforces high-level requirement 1 and high-level requirement 5:
+invariant [suspendable] chain_status::is_operating() ==> exists<StateStorageUsage>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==> exists<GasParameter>(@aptos_framework);
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + +ensure caller is admin. +aborts if StateStorageUsage already exists. + + +
let addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 4:
+aborts_if !system_addresses::is_aptos_framework_address(addr);
+// This enforces high-level requirement 3:
+aborts_if exists<StateStorageUsage>(@aptos_framework);
+ensures exists<StateStorageUsage>(@aptos_framework);
+let post state_usage = global<StateStorageUsage>(@aptos_framework);
+// This enforces high-level requirement 2:
+ensures state_usage.epoch == 0 && state_usage.usage.bytes == 0 && state_usage.usage.items == 0;
+
+ + + + + +### Function `on_new_block` + + +
public(friend) fun on_new_block(epoch: u64)
+
+ + + + +
// This enforces high-level requirement 5:
+requires chain_status::is_operating();
+aborts_if false;
+ensures epoch == global<StateStorageUsage>(@aptos_framework).epoch;
+
+ + + + + +### Function `current_items_and_bytes` + + +
public(friend) fun current_items_and_bytes(): (u64, u64)
+
+ + + + +
// This enforces high-level requirement 5:
+aborts_if !exists<StateStorageUsage>(@aptos_framework);
+
+ + + + + +### Function `get_state_storage_usage_only_at_epoch_beginning` + + +
fun get_state_storage_usage_only_at_epoch_beginning(): state_storage::Usage
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `on_reconfig` + + +
public(friend) fun on_reconfig()
+
+ + + + +
aborts_if true;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/storage_gas.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/storage_gas.md new file mode 100644 index 0000000000000..1e17497c79d00 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/storage_gas.md @@ -0,0 +1,1636 @@ + + + +# Module `0x1::storage_gas` + +Gas parameters for global storage. + + + + +## General overview sections + + +[Definitions](#definitions) + +* [Utilization dimensions](#utilization-dimensions) +* [Utilization ratios](#utilization-ratios) +* [Gas curve lookup](#gas-curve-lookup) +* [Item-wise operations](#item-wise-operations) +* [Byte-wise operations](#byte-wise-operations) + +[Function dependencies](#function-dependencies) + +* [Initialization](#initialization) +* [Reconfiguration](#reconfiguration) +* [Setting configurations](#setting-configurations) + + + + +## Definitions + + + + + +### Utilization dimensions + + +Global storage gas fluctuates each epoch based on total utilization, +which is defined across two dimensions: + +1. The number of "items" in global storage. +2. The number of bytes in global storage. + +"Items" include: + +1. Resources having the key attribute, which have been moved into +global storage via a move_to() operation. +2. Table entries. + + + + +### Utilization ratios + + +initialize() sets an arbitrary "target" utilization for both +item-wise and byte-wise storage, then each epoch, gas parameters are +reconfigured based on the "utilization ratio" for each of the two +utilization dimensions. The utilization ratio for a given dimension, +either item-wise or byte-wise, is taken as the quotient of actual +utilization and target utilization. For example, given a 500 GB +target and 250 GB actual utilization, the byte-wise utilization +ratio is 50%. + +See base_8192_exponential_curve() for mathematical definitions. + + + + +### Gas curve lookup + + +The utilization ratio in a given epoch is used as a lookup value in +a Eulerian approximation to an exponential curve, known as a +GasCurve, which is defined in base_8192_exponential_curve(), +based on a minimum gas charge and a maximum gas charge. + +The minimum gas charge and maximum gas charge at the endpoints of +the curve are set in initialize(), and correspond to the following +operations defined in StorageGas: + +1. Per-item read +2. Per-item create +3. Per-item write +4. Per-byte read +5. Per-byte create +6. Per-byte write + +For example, if the byte-wise utilization ratio is 50%, then +per-byte reads will charge the minimum per-byte gas cost, plus +1.09% of the difference between the maximum and the minimum cost. +See base_8192_exponential_curve() for a supporting calculation. + + + + +### Item-wise operations + + +1. Per-item read gas is assessed whenever an item is read from +global storage via borrow_global<T>() or via a table entry read +operation. +2. Per-item create gas is assessed whenever an item is created in +global storage via move_to<T>() or via a table entry creation +operation. +3. Per-item write gas is assessed whenever an item is overwritten in +global storage via borrow_global_mut<T> or via a table entry +mutation operation. + + + + +### Byte-wise operations + + +Byte-wise operations are assessed in a manner similar to per-item +operations, but account for the number of bytes affected by the +given operation. Notably, this number denotes the total number of +bytes in an *entire item*. + +For example, if an operation mutates a u8 field in a resource that +has 5 other u128 fields, the per-byte gas write cost will account +for $(5 * 128) / 8 + 1 = 81$ bytes. Vectors are similarly treated +as fields. + + + + +## Function dependencies + + +The below dependency chart uses mermaid.js syntax, which can be +automatically rendered into a diagram (depending on the browser) +when viewing the documentation file generated from source code. If +a browser renders the diagrams with coloring that makes it difficult +to read, try a different browser. + + + + +### Initialization + + +```mermaid + +flowchart LR + +initialize --> base_8192_exponential_curve +base_8192_exponential_curve --> new_gas_curve +base_8192_exponential_curve --> new_point +new_gas_curve --> validate_points + +``` + + + + +### Reconfiguration + + +```mermaid + +flowchart LR + +calculate_gas --> Interpolate %% capitalized +calculate_read_gas --> calculate_gas +calculate_create_gas --> calculate_gas +calculate_write_gas --> calculate_gas +on_reconfig --> calculate_read_gas +on_reconfig --> calculate_create_gas +on_reconfig --> calculate_write_gas +reconfiguration::reconfigure --> on_reconfig + +``` + +Here, the function interpolate() is spelled Interpolate because +interpolate is a reserved word in mermaid.js. + + + + +### Setting configurations + + +```mermaid + +flowchart LR + +gas_schedule::set_storage_gas_config --> set_config + +``` + + + + +## Complete docgen index + + +The below index is automatically generated from source code: + + +- [General overview sections](#@General_overview_sections_0) +- [Definitions](#@Definitions_1) + - [Utilization dimensions](#@Utilization_dimensions_2) + - [Utilization ratios](#@Utilization_ratios_3) + - [Gas curve lookup](#@Gas_curve_lookup_4) + - [Item-wise operations](#@Item-wise_operations_5) + - [Byte-wise operations](#@Byte-wise_operations_6) +- [Function dependencies](#@Function_dependencies_7) + - [Initialization](#@Initialization_8) + - [Reconfiguration](#@Reconfiguration_9) + - [Setting configurations](#@Setting_configurations_10) +- [Complete docgen index](#@Complete_docgen_index_11) +- [Resource `StorageGas`](#0x1_storage_gas_StorageGas) +- [Struct `Point`](#0x1_storage_gas_Point) +- [Struct `UsageGasConfig`](#0x1_storage_gas_UsageGasConfig) +- [Struct `GasCurve`](#0x1_storage_gas_GasCurve) +- [Resource `StorageGasConfig`](#0x1_storage_gas_StorageGasConfig) +- [Constants](#@Constants_12) +- [Function `base_8192_exponential_curve`](#0x1_storage_gas_base_8192_exponential_curve) + - [Function definition](#@Function_definition_13) + - [Example](#@Example_14) + - [Utilization multipliers](#@Utilization_multipliers_15) +- [Function `new_point`](#0x1_storage_gas_new_point) +- [Function `new_gas_curve`](#0x1_storage_gas_new_gas_curve) +- [Function `new_usage_gas_config`](#0x1_storage_gas_new_usage_gas_config) +- [Function `new_storage_gas_config`](#0x1_storage_gas_new_storage_gas_config) +- [Function `set_config`](#0x1_storage_gas_set_config) +- [Function `initialize`](#0x1_storage_gas_initialize) +- [Function `validate_points`](#0x1_storage_gas_validate_points) +- [Function `calculate_gas`](#0x1_storage_gas_calculate_gas) +- [Function `interpolate`](#0x1_storage_gas_interpolate) +- [Function `calculate_read_gas`](#0x1_storage_gas_calculate_read_gas) +- [Function `calculate_create_gas`](#0x1_storage_gas_calculate_create_gas) +- [Function `calculate_write_gas`](#0x1_storage_gas_calculate_write_gas) +- [Function `on_reconfig`](#0x1_storage_gas_on_reconfig) +- [Specification](#@Specification_16) + - [Struct `Point`](#@Specification_16_Point) + - [Struct `UsageGasConfig`](#@Specification_16_UsageGasConfig) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Struct `GasCurve`](#@Specification_16_GasCurve) + - [Function `base_8192_exponential_curve`](#@Specification_16_base_8192_exponential_curve) + - [Function `new_point`](#@Specification_16_new_point) + - [Function `new_gas_curve`](#@Specification_16_new_gas_curve) + - [Function `new_usage_gas_config`](#@Specification_16_new_usage_gas_config) + - [Function `new_storage_gas_config`](#@Specification_16_new_storage_gas_config) + - [Function `set_config`](#@Specification_16_set_config) + - [Function `initialize`](#@Specification_16_initialize) + - [Function `validate_points`](#@Specification_16_validate_points) + - [Function `calculate_gas`](#@Specification_16_calculate_gas) + - [Function `interpolate`](#@Specification_16_interpolate) + - [Function `on_reconfig`](#@Specification_16_on_reconfig) + + +
use 0x1::error;
+use 0x1::state_storage;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `StorageGas` + +Storage parameters, reconfigured each epoch. + +Parameters are updated during reconfiguration via +on_reconfig(), based on storage utilization at the beginning +of the epoch in which the reconfiguration transaction is +executed. The gas schedule derived from these parameters will +then be used to calculate gas for the entirety of the +following epoch, such that the data is one epoch older than +ideal. Notably, however, per this approach, the virtual machine +does not need to reload gas parameters after the +first transaction of an epoch. + + +
struct StorageGas has key
+
+ + + +
+Fields + + +
+
+per_item_read: u64 +
+
+ Cost to read an item from global storage. +
+
+per_item_create: u64 +
+
+ Cost to create an item in global storage. +
+
+per_item_write: u64 +
+
+ Cost to overwrite an item in global storage. +
+
+per_byte_read: u64 +
+
+ Cost to read a byte from global storage. +
+
+per_byte_create: u64 +
+
+ Cost to create a byte in global storage. +
+
+per_byte_write: u64 +
+
+ Cost to overwrite a byte in global storage. +
+
+ + +
+ + + +## Struct `Point` + +A point in a Eulerian curve approximation, with each coordinate +given in basis points: + +| Field value | Percentage | +|-------------|------------| +| 1 | 00.01 % | +| 10 | 00.10 % | +| 100 | 01.00 % | +| 1000 | 10.00 % | + + +
struct Point has copy, drop, store
+
+ + + +
+Fields + + +
+
+x: u64 +
+
+ x-coordinate basis points, corresponding to utilization + ratio in base_8192_exponential_curve(). +
+
+y: u64 +
+
+ y-coordinate basis points, corresponding to utilization + multiplier in base_8192_exponential_curve(). +
+
+ + +
+ + + +## Struct `UsageGasConfig` + +A gas configuration for either per-item or per-byte costs. + +Contains a target usage amount, as well as a Eulerian +approximation of an exponential curve for reads, creations, and +overwrites. See StorageGasConfig. + + +
struct UsageGasConfig has copy, drop, store
+
+ + + +
+Fields + + +
+
+target_usage: u64 +
+
+ +
+
+read_curve: storage_gas::GasCurve +
+
+ +
+
+create_curve: storage_gas::GasCurve +
+
+ +
+
+write_curve: storage_gas::GasCurve +
+
+ +
+
+ + +
+ + + +## Struct `GasCurve` + +Eulerian approximation of an exponential curve. + +Assumes the following endpoints: + +* $(x_0, y_0) = (0, 0)$ +* $(x_f, y_f) = (10000, 10000)$ + +Intermediate points must satisfy: + +1. $x_i > x_{i - 1}$ ( $x$ is strictly increasing). +2. $0 \leq x_i \leq 10000$ ( $x$ is between 0 and 10000). +3. $y_i \geq y_{i - 1}$ ( $y$ is non-decreasing). +4. $0 \leq y_i \leq 10000$ ( $y$ is between 0 and 10000). + +Lookup between two successive points is calculated via linear +interpolation, e.g., as if there were a straight line between +them. + +See base_8192_exponential_curve(). + + +
struct GasCurve has copy, drop, store
+
+ + + +
+Fields + + +
+
+min_gas: u64 +
+
+ +
+
+max_gas: u64 +
+
+ +
+
+points: vector<storage_gas::Point> +
+
+ +
+
+ + +
+ + + +## Resource `StorageGasConfig` + +Gas configurations for per-item and per-byte prices. + + +
struct StorageGasConfig has copy, drop, key
+
+ + + +
+Fields + + +
+
+item_config: storage_gas::UsageGasConfig +
+
+ Per-item gas configuration. +
+
+byte_config: storage_gas::UsageGasConfig +
+
+ Per-byte gas configuration. +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U64: u64 = 18446744073709551615;
+
+ + + + + + + +
const BASIS_POINT_DENOMINATION: u64 = 10000;
+
+ + + + + + + +
const EINVALID_GAS_RANGE: u64 = 2;
+
+ + + + + + + +
const EINVALID_MONOTONICALLY_NON_DECREASING_CURVE: u64 = 5;
+
+ + + + + + + +
const EINVALID_POINT_RANGE: u64 = 6;
+
+ + + + + + + +
const ESTORAGE_GAS: u64 = 1;
+
+ + + + + + + +
const ESTORAGE_GAS_CONFIG: u64 = 0;
+
+ + + + + + + +
const ETARGET_USAGE_TOO_BIG: u64 = 4;
+
+ + + + + + + +
const EZERO_TARGET_USAGE: u64 = 3;
+
+ + + + + +## Function `base_8192_exponential_curve` + +Default exponential curve having base 8192. + + + + +### Function definition + + +Gas price as a function of utilization ratio is defined as: + +$$g(u_r) = g_{min} + \frac{(b^{u_r} - 1)}{b - 1} \Delta_g$$ + +$$g(u_r) = g_{min} + u_m \Delta_g$$ + +| Variable | Description | +|-------------------------------------|------------------------| +| $g_{min}$ | min_gas | +| $g_{max}$ | max_gas | +| $\Delta_{g} = g_{max} - g_{min}$ | Gas delta | +| $u$ | Utilization | +| $u_t$ | Target utilization | +| $u_r = u / u_t$ | Utilization ratio | +| $u_m = \frac{(b^{u_r} - 1)}{b - 1}$ | Utilization multiplier | +| $b = 8192$ | Exponent base | + + + + +### Example + + +Hence for a utilization ratio of 50% ( $u_r = 0.5$ ): + +$$g(0.5) = g_{min} + \frac{8192^{0.5} - 1}{8192 - 1} \Delta_g$$ + +$$g(0.5) \approx g_{min} + 0.0109 \Delta_g$$ + +Which means that the price above min_gas is approximately +1.09% of the difference between max_gas and min_gas. + + + + +### Utilization multipliers + + +| $u_r$ | $u_m$ (approximate) | +|-------|---------------------| +| 10% | 0.02% | +| 20% | 0.06% | +| 30% | 0.17% | +| 40% | 0.44% | +| 50% | 1.09% | +| 60% | 2.71% | +| 70% | 6.69% | +| 80% | 16.48% | +| 90% | 40.61% | +| 95% | 63.72% | +| 99% | 91.38% | + + +
public fun base_8192_exponential_curve(min_gas: u64, max_gas: u64): storage_gas::GasCurve
+
+ + + +
+Implementation + + +
public fun base_8192_exponential_curve(min_gas: u64, max_gas: u64): GasCurve {
+    new_gas_curve(min_gas, max_gas,
+        vector[
+            new_point(1000, 2),
+            new_point(2000, 6),
+            new_point(3000, 17),
+            new_point(4000, 44),
+            new_point(5000, 109),
+            new_point(6000, 271),
+            new_point(7000, 669),
+            new_point(8000, 1648),
+            new_point(9000, 4061),
+            new_point(9500, 6372),
+            new_point(9900, 9138),
+        ]
+    )
+}
+
+ + + +
+ + + +## Function `new_point` + + + +
public fun new_point(x: u64, y: u64): storage_gas::Point
+
+ + + +
+Implementation + + +
public fun new_point(x: u64, y: u64): Point {
+    assert!(
+        x <= BASIS_POINT_DENOMINATION && y <= BASIS_POINT_DENOMINATION,
+        error::invalid_argument(EINVALID_POINT_RANGE)
+    );
+    Point { x, y }
+}
+
+ + + +
+ + + +## Function `new_gas_curve` + + + +
public fun new_gas_curve(min_gas: u64, max_gas: u64, points: vector<storage_gas::Point>): storage_gas::GasCurve
+
+ + + +
+Implementation + + +
public fun new_gas_curve(min_gas: u64, max_gas: u64, points: vector<Point>): GasCurve {
+    assert!(max_gas >= min_gas, error::invalid_argument(EINVALID_GAS_RANGE));
+    assert!(max_gas <= MAX_U64 / BASIS_POINT_DENOMINATION, error::invalid_argument(EINVALID_GAS_RANGE));
+    validate_points(&points);
+    GasCurve {
+        min_gas,
+        max_gas,
+        points
+    }
+}
+
+ + + +
+ + + +## Function `new_usage_gas_config` + + + +
public fun new_usage_gas_config(target_usage: u64, read_curve: storage_gas::GasCurve, create_curve: storage_gas::GasCurve, write_curve: storage_gas::GasCurve): storage_gas::UsageGasConfig
+
+ + + +
+Implementation + + +
public fun new_usage_gas_config(target_usage: u64, read_curve: GasCurve, create_curve: GasCurve, write_curve: GasCurve): UsageGasConfig {
+    assert!(target_usage > 0, error::invalid_argument(EZERO_TARGET_USAGE));
+    assert!(target_usage <= MAX_U64 / BASIS_POINT_DENOMINATION, error::invalid_argument(ETARGET_USAGE_TOO_BIG));
+    UsageGasConfig {
+        target_usage,
+        read_curve,
+        create_curve,
+        write_curve,
+    }
+}
+
+ + + +
+ + + +## Function `new_storage_gas_config` + + + +
public fun new_storage_gas_config(item_config: storage_gas::UsageGasConfig, byte_config: storage_gas::UsageGasConfig): storage_gas::StorageGasConfig
+
+ + + +
+Implementation + + +
public fun new_storage_gas_config(item_config: UsageGasConfig, byte_config: UsageGasConfig): StorageGasConfig {
+    StorageGasConfig {
+        item_config,
+        byte_config
+    }
+}
+
+ + + +
+ + + +## Function `set_config` + + + +
public(friend) fun set_config(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + +
+Implementation + + +
public(friend) fun set_config(aptos_framework: &signer, config: StorageGasConfig) acquires StorageGasConfig {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    *borrow_global_mut<StorageGasConfig>(@aptos_framework) = config;
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize per-item and per-byte gas prices. + +Target utilization is set to 2 billion items and 1 TB. + +GasCurve endpoints are initialized as follows: + +| Data style | Operation | Minimum gas | Maximum gas | +|------------|-----------|-------------|-------------| +| Per item | Read | 300K | 300K * 100 | +| Per item | Create | 300k | 300k * 100 | +| Per item | Write | 300K | 300K * 100 | +| Per byte | Read | 300 | 300 * 100 | +| Per byte | Create | 5K | 5K * 100 | +| Per byte | Write | 5K | 5K * 100 | + +StorageGas values are additionally initialized, but per +on_reconfig(), they will be reconfigured for each subsequent +epoch after initialization. + +See base_8192_exponential_curve() fore more information on +target utilization. + + +
public fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        !exists<StorageGasConfig>(@aptos_framework),
+        error::already_exists(ESTORAGE_GAS_CONFIG)
+    );
+
+    let k: u64 = 1000;
+    let m: u64 = 1000 * 1000;
+
+    let item_config = UsageGasConfig {
+        target_usage: 2 * k * m, // 2 billion
+        read_curve: base_8192_exponential_curve(300 * k, 300 * k * 100),
+        create_curve: base_8192_exponential_curve(300 * k, 300 * k * 100),
+        write_curve: base_8192_exponential_curve(300 * k, 300 * k * 100),
+    };
+    let byte_config = UsageGasConfig {
+        target_usage: 1 * m * m, // 1TB
+        read_curve: base_8192_exponential_curve(300, 300 * 100),
+        create_curve: base_8192_exponential_curve(5 * k,  5 * k * 100),
+        write_curve: base_8192_exponential_curve(5 * k,  5 * k * 100),
+    };
+    move_to(aptos_framework, StorageGasConfig {
+        item_config,
+        byte_config,
+    });
+
+    assert!(
+        !exists<StorageGas>(@aptos_framework),
+        error::already_exists(ESTORAGE_GAS)
+    );
+    move_to(aptos_framework, StorageGas {
+        per_item_read: 300 * k,
+        per_item_create: 5 * m,
+        per_item_write: 300 * k,
+        per_byte_read: 300,
+        per_byte_create: 5 * k,
+        per_byte_write: 5 * k,
+    });
+}
+
+ + + +
+ + + +## Function `validate_points` + + + +
fun validate_points(points: &vector<storage_gas::Point>)
+
+ + + +
+Implementation + + +
fun validate_points(points: &vector<Point>) {
+    let len = vector::length(points);
+    spec {
+        assume len < MAX_U64;
+    };
+    let i = 0;
+    while ({
+        spec {
+            invariant forall j in 0..i: {
+                let cur = if (j == 0) { Point { x: 0, y: 0 } } else { points[j - 1] };
+                let next = if (j == len) { Point { x: BASIS_POINT_DENOMINATION, y: BASIS_POINT_DENOMINATION } } else { points[j] };
+                cur.x < next.x && cur.y <= next.y
+            };
+        };
+        i <= len
+    }) {
+        let cur = if (i == 0) { &Point { x: 0, y: 0 } } else { vector::borrow(points, i - 1) };
+        let next = if (i == len) { &Point { x: BASIS_POINT_DENOMINATION, y: BASIS_POINT_DENOMINATION } } else { vector::borrow(points, i) };
+        assert!(cur.x < next.x && cur.y <= next.y, error::invalid_argument(EINVALID_MONOTONICALLY_NON_DECREASING_CURVE));
+        i = i + 1;
+    }
+}
+
+ + + +
+ + + +## Function `calculate_gas` + + + +
fun calculate_gas(max_usage: u64, current_usage: u64, curve: &storage_gas::GasCurve): u64
+
+ + + +
+Implementation + + +
fun calculate_gas(max_usage: u64, current_usage: u64, curve: &GasCurve): u64 {
+    let capped_current_usage = if (current_usage > max_usage) max_usage else current_usage;
+    let points = &curve.points;
+    let num_points = vector::length(points);
+    let current_usage_bps = capped_current_usage * BASIS_POINT_DENOMINATION / max_usage;
+
+    // Check the corner case that current_usage_bps drops before the first point.
+    let (left, right) = if (num_points == 0) {
+        (&Point { x: 0, y: 0 }, &Point { x: BASIS_POINT_DENOMINATION, y: BASIS_POINT_DENOMINATION })
+    } else if (current_usage_bps < vector::borrow(points, 0).x) {
+        (&Point { x: 0, y: 0 }, vector::borrow(points, 0))
+    } else if (vector::borrow(points, num_points - 1).x <= current_usage_bps) {
+        (vector::borrow(points, num_points - 1), &Point { x: BASIS_POINT_DENOMINATION, y: BASIS_POINT_DENOMINATION })
+    } else {
+        let (i, j) = (0, num_points - 2);
+        while ({
+            spec {
+                invariant i <= j;
+                invariant j < num_points - 1;
+                invariant points[i].x <= current_usage_bps;
+                invariant current_usage_bps < points[j + 1].x;
+            };
+            i < j
+        }) {
+            let mid = j - (j - i) / 2;
+            if (current_usage_bps < vector::borrow(points, mid).x) {
+                spec {
+                    // j is strictly decreasing.
+                    assert mid - 1 < j;
+                };
+                j = mid - 1;
+            } else {
+                spec {
+                    // i is strictly increasing.
+                    assert i < mid;
+                };
+                i = mid;
+            };
+        };
+        (vector::borrow(points, i), vector::borrow(points, i + 1))
+    };
+    let y_interpolated = interpolate(left.x, right.x, left.y, right.y, current_usage_bps);
+    interpolate(0, BASIS_POINT_DENOMINATION, curve.min_gas, curve.max_gas, y_interpolated)
+}
+
+ + + +
+ + + +## Function `interpolate` + + + +
fun interpolate(x0: u64, x1: u64, y0: u64, y1: u64, x: u64): u64
+
+ + + +
+Implementation + + +
fun interpolate(x0: u64, x1: u64, y0: u64, y1: u64, x: u64): u64 {
+    y0 + (x - x0) * (y1 - y0) / (x1 - x0)
+}
+
+ + + +
+ + + +## Function `calculate_read_gas` + + + +
fun calculate_read_gas(config: &storage_gas::UsageGasConfig, usage: u64): u64
+
+ + + +
+Implementation + + +
fun calculate_read_gas(config: &UsageGasConfig, usage: u64): u64 {
+    calculate_gas(config.target_usage, usage, &config.read_curve)
+}
+
+ + + +
+ + + +## Function `calculate_create_gas` + + + +
fun calculate_create_gas(config: &storage_gas::UsageGasConfig, usage: u64): u64
+
+ + + +
+Implementation + + +
fun calculate_create_gas(config: &UsageGasConfig, usage: u64): u64 {
+    calculate_gas(config.target_usage, usage, &config.create_curve)
+}
+
+ + + +
+ + + +## Function `calculate_write_gas` + + + +
fun calculate_write_gas(config: &storage_gas::UsageGasConfig, usage: u64): u64
+
+ + + +
+Implementation + + +
fun calculate_write_gas(config: &UsageGasConfig, usage: u64): u64 {
+    calculate_gas(config.target_usage, usage, &config.write_curve)
+}
+
+ + + +
+ + + +## Function `on_reconfig` + + + +
public(friend) fun on_reconfig()
+
+ + + +
+Implementation + + +
public(friend) fun on_reconfig() acquires StorageGas, StorageGasConfig {
+    assert!(
+        exists<StorageGasConfig>(@aptos_framework),
+        error::not_found(ESTORAGE_GAS_CONFIG)
+    );
+    assert!(
+        exists<StorageGas>(@aptos_framework),
+        error::not_found(ESTORAGE_GAS)
+    );
+    let (items, bytes) = state_storage::current_items_and_bytes();
+    let gas_config = borrow_global<StorageGasConfig>(@aptos_framework);
+    let gas = borrow_global_mut<StorageGas>(@aptos_framework);
+    gas.per_item_read = calculate_read_gas(&gas_config.item_config, items);
+    gas.per_item_create = calculate_create_gas(&gas_config.item_config, items);
+    gas.per_item_write = calculate_write_gas(&gas_config.item_config, items);
+    gas.per_byte_read = calculate_read_gas(&gas_config.byte_config, bytes);
+    gas.per_byte_create = calculate_create_gas(&gas_config.byte_config, bytes);
+    gas.per_byte_write = calculate_write_gas(&gas_config.byte_config, bytes);
+}
+
+ + + +
+ + + +## Specification + + + + + + +
fun spec_calculate_gas(max_usage: u64, current_usage: u64, curve: GasCurve): u64;
+
+ + + + + + + +
schema NewGasCurveAbortsIf {
+    min_gas: u64;
+    max_gas: u64;
+    aborts_if max_gas < min_gas;
+    aborts_if max_gas > MAX_U64 / BASIS_POINT_DENOMINATION;
+}
+
+ + +A non decreasing curve must ensure that next is greater than cur. + + + + + +
schema ValidatePointsAbortsIf {
+    points: vector<Point>;
+    // This enforces high-level requirement 2:
+    aborts_if exists i in 0..len(points) - 1: (
+        points[i].x >= points[i + 1].x || points[i].y > points[i + 1].y
+    );
+    aborts_if len(points) > 0 && points[0].x == 0;
+    aborts_if len(points) > 0 && points[len(points) - 1].x == BASIS_POINT_DENOMINATION;
+}
+
+ + + + + +### Struct `Point` + + +
struct Point has copy, drop, store
+
+ + + +
+
+x: u64 +
+
+ x-coordinate basis points, corresponding to utilization + ratio in base_8192_exponential_curve(). +
+
+y: u64 +
+
+ y-coordinate basis points, corresponding to utilization + multiplier in base_8192_exponential_curve(). +
+
+ + + +
invariant x <= BASIS_POINT_DENOMINATION;
+invariant y <= BASIS_POINT_DENOMINATION;
+
+ + + + + +### Struct `UsageGasConfig` + + +
struct UsageGasConfig has copy, drop, store
+
+ + + +
+
+target_usage: u64 +
+
+ +
+
+read_curve: storage_gas::GasCurve +
+
+ +
+
+create_curve: storage_gas::GasCurve +
+
+ +
+
+write_curve: storage_gas::GasCurve +
+
+ +
+
+ + + +
invariant target_usage > 0;
+invariant target_usage <= MAX_U64 / BASIS_POINT_DENOMINATION;
+
+ + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The module's initialization guarantees the creation of the StorageGasConfig resource with a precise configuration, including accurate gas curves for per-item and per-byte operations.MediumThe initialize function is responsible for setting up the initial state of the module, ensuring the fulfillment of the following conditions: (1) the creation of the StorageGasConfig resource, indicating its existence witqhin the module's context, and (2) the configuration of the StorageGasConfig resource includes the precise gas curves that define the behavior of per-item and per-byte operations.Formally verified via initialize. Moreover, the native gas logic has been manually audited.
2The gas curve approximates an exponential curve based on a minimum and maximum gas charge.HighThe validate_points function ensures that the provided vector of points represents a monotonically non-decreasing curve.Formally verified via validate_points. Moreover, the configuration logic has been manually audited.
3The initialized gas curve structure has values set according to the provided parameters.LowThe new_gas_curve function initializes the GasCurve structure with values provided as parameters.Formally verified via new_gas_curve.
4The initialized usage gas configuration structure has values set according to the provided parameters.LowThe new_usage_gas_config function initializes the UsageGasConfig structure with values provided as parameters.Formally verified via new_usage_gas_config.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+invariant [suspendable] chain_status::is_operating() ==> exists<StorageGasConfig>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==> exists<StorageGas>(@aptos_framework);
+
+ + + + + +### Struct `GasCurve` + + +
struct GasCurve has copy, drop, store
+
+ + + +
+
+min_gas: u64 +
+
+ +
+
+max_gas: u64 +
+
+ +
+
+points: vector<storage_gas::Point> +
+
+ +
+
+ + +Invariant 1: The minimum gas charge does not exceed the maximum gas charge. + + +
invariant min_gas <= max_gas;
+
+ + +Invariant 2: The maximum gas charge is capped by MAX_U64 scaled down by the basis point denomination. + + +
invariant max_gas <= MAX_U64 / BASIS_POINT_DENOMINATION;
+
+ + +Invariant 3: The x-coordinate increases monotonically and the y-coordinate increasing strictly monotonically, +that is, the gas-curve is a monotonically increasing function. + + +
invariant (len(points) > 0 ==> points[0].x > 0)
+    && (len(points) > 0 ==> points[len(points) - 1].x < BASIS_POINT_DENOMINATION)
+    && (forall i in 0..len(points) - 1: (points[i].x < points[i + 1].x && points[i].y <= points[i + 1].y));
+
+ + + + + +### Function `base_8192_exponential_curve` + + +
public fun base_8192_exponential_curve(min_gas: u64, max_gas: u64): storage_gas::GasCurve
+
+ + + + +
include NewGasCurveAbortsIf;
+
+ + + + + +### Function `new_point` + + +
public fun new_point(x: u64, y: u64): storage_gas::Point
+
+ + + + +
aborts_if x > BASIS_POINT_DENOMINATION || y > BASIS_POINT_DENOMINATION;
+ensures result.x == x;
+ensures result.y == y;
+
+ + + + + +### Function `new_gas_curve` + + +
public fun new_gas_curve(min_gas: u64, max_gas: u64, points: vector<storage_gas::Point>): storage_gas::GasCurve
+
+ + +A non decreasing curve must ensure that next is greater than cur. + + +
pragma verify_duration_estimate = 120;
+include NewGasCurveAbortsIf;
+include ValidatePointsAbortsIf;
+// This enforces high-level requirement 3:
+ensures result == GasCurve {
+    min_gas,
+    max_gas,
+    points
+};
+
+ + + + + +### Function `new_usage_gas_config` + + +
public fun new_usage_gas_config(target_usage: u64, read_curve: storage_gas::GasCurve, create_curve: storage_gas::GasCurve, write_curve: storage_gas::GasCurve): storage_gas::UsageGasConfig
+
+ + + + +
aborts_if target_usage == 0;
+aborts_if target_usage > MAX_U64 / BASIS_POINT_DENOMINATION;
+// This enforces high-level requirement 4:
+ensures result == UsageGasConfig {
+    target_usage,
+    read_curve,
+    create_curve,
+    write_curve,
+};
+
+ + + + + +### Function `new_storage_gas_config` + + +
public fun new_storage_gas_config(item_config: storage_gas::UsageGasConfig, byte_config: storage_gas::UsageGasConfig): storage_gas::StorageGasConfig
+
+ + + + +
aborts_if false;
+ensures result.item_config == item_config;
+ensures result.byte_config == byte_config;
+
+ + + + + +### Function `set_config` + + +
public(friend) fun set_config(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + +Signer address must be @aptos_framework and StorageGasConfig exists. + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+aborts_if !exists<StorageGasConfig>(@aptos_framework);
+
+ + + + + +### Function `initialize` + + +
public fun initialize(aptos_framework: &signer)
+
+ + +Signer address must be @aptos_framework. +Address @aptos_framework does not exist StorageGasConfig and StorageGas before the function call is restricted +and exists after the function is executed. + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+pragma verify_duration_estimate = 120;
+aborts_if exists<StorageGasConfig>(@aptos_framework);
+aborts_if exists<StorageGas>(@aptos_framework);
+// This enforces high-level requirement 1:
+ensures exists<StorageGasConfig>(@aptos_framework);
+ensures exists<StorageGas>(@aptos_framework);
+
+ + + + + +### Function `validate_points` + + +
fun validate_points(points: &vector<storage_gas::Point>)
+
+ + +A non decreasing curve must ensure that next is greater than cur. + + +
pragma aborts_if_is_strict = false;
+pragma verify = false;
+pragma opaque;
+include ValidatePointsAbortsIf;
+
+ + + + + +### Function `calculate_gas` + + +
fun calculate_gas(max_usage: u64, current_usage: u64, curve: &storage_gas::GasCurve): u64
+
+ + + + +
pragma opaque;
+pragma verify_duration_estimate = 120;
+requires max_usage > 0;
+requires max_usage <= MAX_U64 / BASIS_POINT_DENOMINATION;
+aborts_if false;
+ensures [abstract] result == spec_calculate_gas(max_usage, current_usage, curve);
+
+ + + + + +### Function `interpolate` + + +
fun interpolate(x0: u64, x1: u64, y0: u64, y1: u64, x: u64): u64
+
+ + + + +
pragma opaque;
+pragma intrinsic;
+aborts_if false;
+
+ + + + + +### Function `on_reconfig` + + +
public(friend) fun on_reconfig()
+
+ + +Address @aptos_framework must exist StorageGasConfig and StorageGas and StateStorageUsage. + + +
requires chain_status::is_operating();
+aborts_if !exists<StorageGasConfig>(@aptos_framework);
+aborts_if !exists<StorageGas>(@aptos_framework);
+aborts_if !exists<state_storage::StateStorageUsage>(@aptos_framework);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/system_addresses.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/system_addresses.md new file mode 100644 index 0000000000000..d17af72b2f98b --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/system_addresses.md @@ -0,0 +1,594 @@ + + + +# Module `0x1::system_addresses` + + + +- [Constants](#@Constants_0) +- [Function `assert_core_resource`](#0x1_system_addresses_assert_core_resource) +- [Function `assert_core_resource_address`](#0x1_system_addresses_assert_core_resource_address) +- [Function `is_core_resource_address`](#0x1_system_addresses_is_core_resource_address) +- [Function `assert_aptos_framework`](#0x1_system_addresses_assert_aptos_framework) +- [Function `assert_framework_reserved_address`](#0x1_system_addresses_assert_framework_reserved_address) +- [Function `assert_framework_reserved`](#0x1_system_addresses_assert_framework_reserved) +- [Function `is_framework_reserved_address`](#0x1_system_addresses_is_framework_reserved_address) +- [Function `is_aptos_framework_address`](#0x1_system_addresses_is_aptos_framework_address) +- [Function `assert_vm`](#0x1_system_addresses_assert_vm) +- [Function `is_vm`](#0x1_system_addresses_is_vm) +- [Function `is_vm_address`](#0x1_system_addresses_is_vm_address) +- [Function `is_reserved_address`](#0x1_system_addresses_is_reserved_address) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `assert_core_resource`](#@Specification_1_assert_core_resource) + - [Function `assert_core_resource_address`](#@Specification_1_assert_core_resource_address) + - [Function `is_core_resource_address`](#@Specification_1_is_core_resource_address) + - [Function `assert_aptos_framework`](#@Specification_1_assert_aptos_framework) + - [Function `assert_framework_reserved_address`](#@Specification_1_assert_framework_reserved_address) + - [Function `assert_framework_reserved`](#@Specification_1_assert_framework_reserved) + - [Function `assert_vm`](#@Specification_1_assert_vm) + + +
use 0x1::error;
+use 0x1::signer;
+
+ + + + + +## Constants + + + + +The address/account did not correspond to the core framework address + + +
const ENOT_APTOS_FRAMEWORK_ADDRESS: u64 = 3;
+
+ + + + + +The address/account did not correspond to the core resource address + + +
const ENOT_CORE_RESOURCE_ADDRESS: u64 = 1;
+
+ + + + + +The address is not framework reserved address + + +
const ENOT_FRAMEWORK_RESERVED_ADDRESS: u64 = 4;
+
+ + + + + +The operation can only be performed by the VM + + +
const EVM: u64 = 2;
+
+ + + + + +## Function `assert_core_resource` + + + +
public fun assert_core_resource(account: &signer)
+
+ + + +
+Implementation + + +
public fun assert_core_resource(account: &signer) {
+    assert_core_resource_address(signer::address_of(account))
+}
+
+ + + +
+ + + +## Function `assert_core_resource_address` + + + +
public fun assert_core_resource_address(addr: address)
+
+ + + +
+Implementation + + +
public fun assert_core_resource_address(addr: address) {
+    assert!(is_core_resource_address(addr), error::permission_denied(ENOT_CORE_RESOURCE_ADDRESS))
+}
+
+ + + +
+ + + +## Function `is_core_resource_address` + + + +
public fun is_core_resource_address(addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_core_resource_address(addr: address): bool {
+    addr == @core_resources
+}
+
+ + + +
+ + + +## Function `assert_aptos_framework` + + + +
public fun assert_aptos_framework(account: &signer)
+
+ + + +
+Implementation + + +
public fun assert_aptos_framework(account: &signer) {
+    assert!(
+        is_aptos_framework_address(signer::address_of(account)),
+        error::permission_denied(ENOT_APTOS_FRAMEWORK_ADDRESS),
+    )
+}
+
+ + + +
+ + + +## Function `assert_framework_reserved_address` + + + +
public fun assert_framework_reserved_address(account: &signer)
+
+ + + +
+Implementation + + +
public fun assert_framework_reserved_address(account: &signer) {
+    assert_framework_reserved(signer::address_of(account));
+}
+
+ + + +
+ + + +## Function `assert_framework_reserved` + + + +
public fun assert_framework_reserved(addr: address)
+
+ + + +
+Implementation + + +
public fun assert_framework_reserved(addr: address) {
+    assert!(
+        is_framework_reserved_address(addr),
+        error::permission_denied(ENOT_FRAMEWORK_RESERVED_ADDRESS),
+    )
+}
+
+ + + +
+ + + +## Function `is_framework_reserved_address` + +Return true if addr is 0x0 or under the on chain governance's control. + + +
public fun is_framework_reserved_address(addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_framework_reserved_address(addr: address): bool {
+    is_aptos_framework_address(addr) ||
+        addr == @0x2 ||
+        addr == @0x3 ||
+        addr == @0x4 ||
+        addr == @0x5 ||
+        addr == @0x6 ||
+        addr == @0x7 ||
+        addr == @0x8 ||
+        addr == @0x9 ||
+        addr == @0xa
+}
+
+ + + +
+ + + +## Function `is_aptos_framework_address` + +Return true if addr is 0x1. + + +
public fun is_aptos_framework_address(addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_aptos_framework_address(addr: address): bool {
+    addr == @aptos_framework
+}
+
+ + + +
+ + + +## Function `assert_vm` + +Assert that the signer has the VM reserved address. + + +
public fun assert_vm(account: &signer)
+
+ + + +
+Implementation + + +
public fun assert_vm(account: &signer) {
+    assert!(is_vm(account), error::permission_denied(EVM))
+}
+
+ + + +
+ + + +## Function `is_vm` + +Return true if addr is a reserved address for the VM to call system modules. + + +
public fun is_vm(account: &signer): bool
+
+ + + +
+Implementation + + +
public fun is_vm(account: &signer): bool {
+    is_vm_address(signer::address_of(account))
+}
+
+ + + +
+ + + +## Function `is_vm_address` + +Return true if addr is a reserved address for the VM to call system modules. + + +
public fun is_vm_address(addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_vm_address(addr: address): bool {
+    addr == @vm_reserved
+}
+
+ + + +
+ + + +## Function `is_reserved_address` + +Return true if addr is either the VM address or an Aptos Framework address. + + +
public fun is_reserved_address(addr: address): bool
+
+ + + +
+Implementation + + +
public fun is_reserved_address(addr: address): bool {
+    is_aptos_framework_address(addr) || is_vm_address(addr)
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Asserting that a provided address corresponds to the Core Resources address should always yield a true result when matched.LowThe assert_core_resource and assert_core_resource_address functions ensure that the provided signer or address belong to the @core_resources account.Formally verified via AbortsIfNotCoreResource.
2Asserting that a provided address corresponds to the Aptos Framework Resources address should always yield a true result when matched.HighThe assert_aptos_framework function ensures that the provided signer belongs to the @aptos_framework account.Formally verified via AbortsIfNotAptosFramework.
3Asserting that a provided address corresponds to the VM address should always yield a true result when matched.HighThe assert_vm function ensure that the provided signer belongs to the @vm_reserved account.Formally verified via AbortsIfNotVM.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `assert_core_resource` + + +
public fun assert_core_resource(account: &signer)
+
+ + + + +
pragma opaque;
+include AbortsIfNotCoreResource { addr: signer::address_of(account) };
+
+ + + + + +### Function `assert_core_resource_address` + + +
public fun assert_core_resource_address(addr: address)
+
+ + + + +
pragma opaque;
+include AbortsIfNotCoreResource;
+
+ + + + + +### Function `is_core_resource_address` + + +
public fun is_core_resource_address(addr: address): bool
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == (addr == @core_resources);
+
+ + + + + +### Function `assert_aptos_framework` + + +
public fun assert_aptos_framework(account: &signer)
+
+ + + + +
pragma opaque;
+include AbortsIfNotAptosFramework;
+
+ + + + + +### Function `assert_framework_reserved_address` + + +
public fun assert_framework_reserved_address(account: &signer)
+
+ + + + +
aborts_if !is_framework_reserved_address(signer::address_of(account));
+
+ + + + + +### Function `assert_framework_reserved` + + +
public fun assert_framework_reserved(addr: address)
+
+ + + + +
aborts_if !is_framework_reserved_address(addr);
+
+ + +Specifies that a function aborts if the account does not have the aptos framework address. + + + + + +
schema AbortsIfNotAptosFramework {
+    account: signer;
+    // This enforces high-level requirement 2:
+    aborts_if signer::address_of(account) != @aptos_framework with error::PERMISSION_DENIED;
+}
+
+ + + + + +### Function `assert_vm` + + +
public fun assert_vm(account: &signer)
+
+ + + + +
pragma opaque;
+include AbortsIfNotVM;
+
+ + +Specifies that a function aborts if the account does not have the VM reserved address. + + + + + +
schema AbortsIfNotVM {
+    account: signer;
+    // This enforces high-level requirement 3:
+    aborts_if signer::address_of(account) != @vm_reserved with error::PERMISSION_DENIED;
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/timestamp.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/timestamp.md new file mode 100644 index 0000000000000..45db531cc6b00 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/timestamp.md @@ -0,0 +1,334 @@ + + + +# Module `0x1::timestamp` + +This module keeps a global wall clock that stores the current Unix time in microseconds. +It interacts with the other modules in the following ways: +* genesis: to initialize the timestamp +* block: to reach consensus on the global wall clock time + + +- [Resource `CurrentTimeMicroseconds`](#0x1_timestamp_CurrentTimeMicroseconds) +- [Constants](#@Constants_0) +- [Function `set_time_has_started`](#0x1_timestamp_set_time_has_started) +- [Function `update_global_time`](#0x1_timestamp_update_global_time) +- [Function `now_microseconds`](#0x1_timestamp_now_microseconds) +- [Function `now_seconds`](#0x1_timestamp_now_seconds) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `update_global_time`](#@Specification_1_update_global_time) + + +
use 0x1::error;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `CurrentTimeMicroseconds` + +A singleton resource holding the current Unix time in microseconds + + +
struct CurrentTimeMicroseconds has key
+
+ + + +
+Fields + + +
+
+microseconds: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The blockchain is not in an operating state yet + + +
const ENOT_OPERATING: u64 = 1;
+
+ + + + + +An invalid timestamp was provided + + +
const EINVALID_TIMESTAMP: u64 = 2;
+
+ + + + + +Conversion factor between seconds and microseconds + + +
const MICRO_CONVERSION_FACTOR: u64 = 1000000;
+
+ + + + + +## Function `set_time_has_started` + +Marks that time has started. This can only be called from genesis and with the aptos framework account. + + +
public(friend) fun set_time_has_started(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun set_time_has_started(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let timer = CurrentTimeMicroseconds { microseconds: 0 };
+    move_to(aptos_framework, timer);
+}
+
+ + + +
+ + + +## Function `update_global_time` + +Updates the wall clock time by consensus. Requires VM privilege and will be invoked during block prologue. + + +
public fun update_global_time(account: &signer, proposer: address, timestamp: u64)
+
+ + + +
+Implementation + + +
public fun update_global_time(
+    account: &signer,
+    proposer: address,
+    timestamp: u64
+) acquires CurrentTimeMicroseconds {
+    // Can only be invoked by AptosVM signer.
+    system_addresses::assert_vm(account);
+
+    let global_timer = borrow_global_mut<CurrentTimeMicroseconds>(@aptos_framework);
+    let now = global_timer.microseconds;
+    if (proposer == @vm_reserved) {
+        // NIL block with null address as proposer. Timestamp must be equal.
+        assert!(now == timestamp, error::invalid_argument(EINVALID_TIMESTAMP));
+    } else {
+        // Normal block. Time must advance
+        assert!(now < timestamp, error::invalid_argument(EINVALID_TIMESTAMP));
+        global_timer.microseconds = timestamp;
+    };
+}
+
+ + + +
+ + + +## Function `now_microseconds` + +Gets the current time in microseconds. + + +
#[view]
+public fun now_microseconds(): u64
+
+ + + +
+Implementation + + +
public fun now_microseconds(): u64 acquires CurrentTimeMicroseconds {
+    borrow_global<CurrentTimeMicroseconds>(@aptos_framework).microseconds
+}
+
+ + + +
+ + + +## Function `now_seconds` + +Gets the current time in seconds. + + +
#[view]
+public fun now_seconds(): u64
+
+ + + +
+Implementation + + +
public fun now_seconds(): u64 acquires CurrentTimeMicroseconds {
+    now_microseconds() / MICRO_CONVERSION_FACTOR
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1There should only exist one global wall clock and it should be created during genesis.HighThe function set_time_has_started is only called by genesis::initialize and ensures that no other resources of this type exist by only assigning it to a predefined account.Formally verified via module.
2The global wall clock resource should only be owned by the Aptos framework.HighThe function set_time_has_started ensures that only the aptos_framework account can possess the CurrentTimeMicroseconds resource using the assert_aptos_framework function.Formally verified via module.
3The clock time should only be updated by the VM account.HighThe update_global_time function asserts that the transaction signer is the vm_reserved account.Formally verified via UpdateGlobalTimeAbortsIf.
4The clock time should increase with every update as agreed through consensus and proposed by the current epoch's validator.HighThe update_global_time function asserts that the new timestamp is greater than the current timestamp.Formally verified via UpdateGlobalTimeAbortsIf.
+ + + + + + +### Module-level Specification + + +
// This enforces high-level requirement 1 and high-level requirement 2:
+invariant [suspendable] chain_status::is_operating() ==> exists<CurrentTimeMicroseconds>(@aptos_framework);
+
+ + + + + +### Function `update_global_time` + + +
public fun update_global_time(account: &signer, proposer: address, timestamp: u64)
+
+ + + + +
requires chain_status::is_operating();
+include UpdateGlobalTimeAbortsIf;
+ensures (proposer != @vm_reserved) ==> (spec_now_microseconds() == timestamp);
+
+ + + + + + + +
schema UpdateGlobalTimeAbortsIf {
+    account: signer;
+    proposer: address;
+    timestamp: u64;
+    // This enforces high-level requirement 3:
+    aborts_if !system_addresses::is_vm(account);
+    // This enforces high-level requirement 4:
+    aborts_if (proposer == @vm_reserved) && (spec_now_microseconds() != timestamp);
+    aborts_if (proposer != @vm_reserved) && (spec_now_microseconds() >= timestamp);
+}
+
+ + + + + + + +
fun spec_now_microseconds(): u64 {
+   global<CurrentTimeMicroseconds>(@aptos_framework).microseconds
+}
+
+ + + + + + + +
fun spec_now_seconds(): u64 {
+   spec_now_microseconds() / MICRO_CONVERSION_FACTOR
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_context.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_context.md new file mode 100644 index 0000000000000..2bdbe14562187 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_context.md @@ -0,0 +1,1288 @@ + + + +# Module `0x1::transaction_context` + + + +- [Struct `AUID`](#0x1_transaction_context_AUID) +- [Struct `EntryFunctionPayload`](#0x1_transaction_context_EntryFunctionPayload) +- [Struct `MultisigPayload`](#0x1_transaction_context_MultisigPayload) +- [Constants](#@Constants_0) +- [Function `get_txn_hash`](#0x1_transaction_context_get_txn_hash) +- [Function `get_transaction_hash`](#0x1_transaction_context_get_transaction_hash) +- [Function `generate_unique_address`](#0x1_transaction_context_generate_unique_address) +- [Function `generate_auid_address`](#0x1_transaction_context_generate_auid_address) +- [Function `get_script_hash`](#0x1_transaction_context_get_script_hash) +- [Function `generate_auid`](#0x1_transaction_context_generate_auid) +- [Function `auid_address`](#0x1_transaction_context_auid_address) +- [Function `sender`](#0x1_transaction_context_sender) +- [Function `sender_internal`](#0x1_transaction_context_sender_internal) +- [Function `secondary_signers`](#0x1_transaction_context_secondary_signers) +- [Function `secondary_signers_internal`](#0x1_transaction_context_secondary_signers_internal) +- [Function `gas_payer`](#0x1_transaction_context_gas_payer) +- [Function `gas_payer_internal`](#0x1_transaction_context_gas_payer_internal) +- [Function `max_gas_amount`](#0x1_transaction_context_max_gas_amount) +- [Function `max_gas_amount_internal`](#0x1_transaction_context_max_gas_amount_internal) +- [Function `gas_unit_price`](#0x1_transaction_context_gas_unit_price) +- [Function `gas_unit_price_internal`](#0x1_transaction_context_gas_unit_price_internal) +- [Function `chain_id`](#0x1_transaction_context_chain_id) +- [Function `chain_id_internal`](#0x1_transaction_context_chain_id_internal) +- [Function `entry_function_payload`](#0x1_transaction_context_entry_function_payload) +- [Function `entry_function_payload_internal`](#0x1_transaction_context_entry_function_payload_internal) +- [Function `account_address`](#0x1_transaction_context_account_address) +- [Function `module_name`](#0x1_transaction_context_module_name) +- [Function `function_name`](#0x1_transaction_context_function_name) +- [Function `type_arg_names`](#0x1_transaction_context_type_arg_names) +- [Function `args`](#0x1_transaction_context_args) +- [Function `multisig_payload`](#0x1_transaction_context_multisig_payload) +- [Function `multisig_payload_internal`](#0x1_transaction_context_multisig_payload_internal) +- [Function `multisig_address`](#0x1_transaction_context_multisig_address) +- [Function `inner_entry_function_payload`](#0x1_transaction_context_inner_entry_function_payload) +- [Specification](#@Specification_1) + - [Function `get_txn_hash`](#@Specification_1_get_txn_hash) + - [Function `get_transaction_hash`](#@Specification_1_get_transaction_hash) + - [Function `generate_unique_address`](#@Specification_1_generate_unique_address) + - [Function `generate_auid_address`](#@Specification_1_generate_auid_address) + - [Function `get_script_hash`](#@Specification_1_get_script_hash) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `auid_address`](#@Specification_1_auid_address) + - [Function `sender_internal`](#@Specification_1_sender_internal) + - [Function `secondary_signers_internal`](#@Specification_1_secondary_signers_internal) + - [Function `gas_payer_internal`](#@Specification_1_gas_payer_internal) + - [Function `max_gas_amount_internal`](#@Specification_1_max_gas_amount_internal) + - [Function `gas_unit_price_internal`](#@Specification_1_gas_unit_price_internal) + - [Function `chain_id_internal`](#@Specification_1_chain_id_internal) + - [Function `entry_function_payload_internal`](#@Specification_1_entry_function_payload_internal) + - [Function `multisig_payload_internal`](#@Specification_1_multisig_payload_internal) + + +
use 0x1::error;
+use 0x1::features;
+use 0x1::option;
+use 0x1::string;
+
+ + + + + +## Struct `AUID` + +A wrapper denoting aptos unique identifer (AUID) +for storing an address + + +
struct AUID has drop, store
+
+ + + +
+Fields + + +
+
+unique_address: address +
+
+ +
+
+ + +
+ + + +## Struct `EntryFunctionPayload` + +Represents the entry function payload. + + +
struct EntryFunctionPayload has copy, drop
+
+ + + +
+Fields + + +
+
+account_address: address +
+
+ +
+
+module_name: string::String +
+
+ +
+
+function_name: string::String +
+
+ +
+
+ty_args_names: vector<string::String> +
+
+ +
+
+args: vector<vector<u8>> +
+
+ +
+
+ + +
+ + + +## Struct `MultisigPayload` + +Represents the multisig payload. + + +
struct MultisigPayload has copy, drop
+
+ + + +
+Fields + + +
+
+multisig_address: address +
+
+ +
+
+entry_function_payload: option::Option<transaction_context::EntryFunctionPayload> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The transaction context extension feature is not enabled. + + +
const ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED: u64 = 2;
+
+ + + + + +Transaction context is only available in the user transaction prologue, execution, or epilogue phases. + + +
const ETRANSACTION_CONTEXT_NOT_AVAILABLE: u64 = 1;
+
+ + + + + +## Function `get_txn_hash` + +Returns the transaction hash of the current transaction. + + +
fun get_txn_hash(): vector<u8>
+
+ + + +
+Implementation + + +
native fun get_txn_hash(): vector<u8>;
+
+ + + +
+ + + +## Function `get_transaction_hash` + +Returns the transaction hash of the current transaction. +Internally calls the private function get_txn_hash. +This function is created for to feature gate the get_txn_hash function. + + +
public fun get_transaction_hash(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_transaction_hash(): vector<u8> {
+    get_txn_hash()
+}
+
+ + + +
+ + + +## Function `generate_unique_address` + +Returns a universally unique identifier (of type address) generated +by hashing the transaction hash of this transaction and a sequence number +specific to this transaction. This function can be called any +number of times inside a single transaction. Each such call increments +the sequence number and generates a new unique address. +Uses Scheme in types/src/transaction/authenticator.rs for domain separation +from other ways of generating unique addresses. + + +
fun generate_unique_address(): address
+
+ + + +
+Implementation + + +
native fun generate_unique_address(): address;
+
+ + + +
+ + + +## Function `generate_auid_address` + +Returns a aptos unique identifier. Internally calls +the private function generate_unique_address. This function is +created for to feature gate the generate_unique_address function. + + +
public fun generate_auid_address(): address
+
+ + + +
+Implementation + + +
public fun generate_auid_address(): address {
+    generate_unique_address()
+}
+
+ + + +
+ + + +## Function `get_script_hash` + +Returns the script hash of the current entry function. + + +
public fun get_script_hash(): vector<u8>
+
+ + + +
+Implementation + + +
public native fun get_script_hash(): vector<u8>;
+
+ + + +
+ + + +## Function `generate_auid` + +This method runs generate_unique_address native function and returns +the generated unique address wrapped in the AUID class. + + +
public fun generate_auid(): transaction_context::AUID
+
+ + + +
+Implementation + + +
public fun generate_auid(): AUID {
+    return AUID {
+        unique_address: generate_unique_address()
+    }
+}
+
+ + + +
+ + + +## Function `auid_address` + +Returns the unique address wrapped in the given AUID struct. + + +
public fun auid_address(auid: &transaction_context::AUID): address
+
+ + + +
+Implementation + + +
public fun auid_address(auid: &AUID): address {
+    auid.unique_address
+}
+
+ + + +
+ + + +## Function `sender` + +Returns the sender's address for the current transaction. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun sender(): address
+
+ + + +
+Implementation + + +
public fun sender(): address {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    sender_internal()
+}
+
+ + + +
+ + + +## Function `sender_internal` + + + +
fun sender_internal(): address
+
+ + + +
+Implementation + + +
native fun sender_internal(): address;
+
+ + + +
+ + + +## Function `secondary_signers` + +Returns the list of the secondary signers for the current transaction. +If the current transaction has no secondary signers, this function returns an empty vector. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun secondary_signers(): vector<address>
+
+ + + +
+Implementation + + +
public fun secondary_signers(): vector<address> {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    secondary_signers_internal()
+}
+
+ + + +
+ + + +## Function `secondary_signers_internal` + + + +
fun secondary_signers_internal(): vector<address>
+
+ + + +
+Implementation + + +
native fun secondary_signers_internal(): vector<address>;
+
+ + + +
+ + + +## Function `gas_payer` + +Returns the gas payer address for the current transaction. +It is either the sender's address if no separate gas fee payer is specified for the current transaction, +or the address of the separate gas fee payer if one is specified. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun gas_payer(): address
+
+ + + +
+Implementation + + +
public fun gas_payer(): address {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    gas_payer_internal()
+}
+
+ + + +
+ + + +## Function `gas_payer_internal` + + + +
fun gas_payer_internal(): address
+
+ + + +
+Implementation + + +
native fun gas_payer_internal(): address;
+
+ + + +
+ + + +## Function `max_gas_amount` + +Returns the max gas amount in units which is specified for the current transaction. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun max_gas_amount(): u64
+
+ + + +
+Implementation + + +
public fun max_gas_amount(): u64 {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    max_gas_amount_internal()
+}
+
+ + + +
+ + + +## Function `max_gas_amount_internal` + + + +
fun max_gas_amount_internal(): u64
+
+ + + +
+Implementation + + +
native fun max_gas_amount_internal(): u64;
+
+ + + +
+ + + +## Function `gas_unit_price` + +Returns the gas unit price in Octas which is specified for the current transaction. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun gas_unit_price(): u64
+
+ + + +
+Implementation + + +
public fun gas_unit_price(): u64 {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    gas_unit_price_internal()
+}
+
+ + + +
+ + + +## Function `gas_unit_price_internal` + + + +
fun gas_unit_price_internal(): u64
+
+ + + +
+Implementation + + +
native fun gas_unit_price_internal(): u64;
+
+ + + +
+ + + +## Function `chain_id` + +Returns the chain ID specified for the current transaction. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun chain_id(): u8
+
+ + + +
+Implementation + + +
public fun chain_id(): u8 {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    chain_id_internal()
+}
+
+ + + +
+ + + +## Function `chain_id_internal` + + + +
fun chain_id_internal(): u8
+
+ + + +
+Implementation + + +
native fun chain_id_internal(): u8;
+
+ + + +
+ + + +## Function `entry_function_payload` + +Returns the entry function payload if the current transaction has such a payload. Otherwise, return None. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun entry_function_payload(): option::Option<transaction_context::EntryFunctionPayload>
+
+ + + +
+Implementation + + +
public fun entry_function_payload(): Option<EntryFunctionPayload> {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    entry_function_payload_internal()
+}
+
+ + + +
+ + + +## Function `entry_function_payload_internal` + + + +
fun entry_function_payload_internal(): option::Option<transaction_context::EntryFunctionPayload>
+
+ + + +
+Implementation + + +
native fun entry_function_payload_internal(): Option<EntryFunctionPayload>;
+
+ + + +
+ + + +## Function `account_address` + +Returns the account address of the entry function payload. + + +
public fun account_address(payload: &transaction_context::EntryFunctionPayload): address
+
+ + + +
+Implementation + + +
public fun account_address(payload: &EntryFunctionPayload): address {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.account_address
+}
+
+ + + +
+ + + +## Function `module_name` + +Returns the module name of the entry function payload. + + +
public fun module_name(payload: &transaction_context::EntryFunctionPayload): string::String
+
+ + + +
+Implementation + + +
public fun module_name(payload: &EntryFunctionPayload): String {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.module_name
+}
+
+ + + +
+ + + +## Function `function_name` + +Returns the function name of the entry function payload. + + +
public fun function_name(payload: &transaction_context::EntryFunctionPayload): string::String
+
+ + + +
+Implementation + + +
public fun function_name(payload: &EntryFunctionPayload): String {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.function_name
+}
+
+ + + +
+ + + +## Function `type_arg_names` + +Returns the type arguments names of the entry function payload. + + +
public fun type_arg_names(payload: &transaction_context::EntryFunctionPayload): vector<string::String>
+
+ + + +
+Implementation + + +
public fun type_arg_names(payload: &EntryFunctionPayload): vector<String> {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.ty_args_names
+}
+
+ + + +
+ + + +## Function `args` + +Returns the arguments of the entry function payload. + + +
public fun args(payload: &transaction_context::EntryFunctionPayload): vector<vector<u8>>
+
+ + + +
+Implementation + + +
public fun args(payload: &EntryFunctionPayload): vector<vector<u8>> {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.args
+}
+
+ + + +
+ + + +## Function `multisig_payload` + +Returns the multisig payload if the current transaction has such a payload. Otherwise, return None. +This function aborts if called outside of the transaction prologue, execution, or epilogue phases. + + +
public fun multisig_payload(): option::Option<transaction_context::MultisigPayload>
+
+ + + +
+Implementation + + +
public fun multisig_payload(): Option<MultisigPayload> {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    multisig_payload_internal()
+}
+
+ + + +
+ + + +## Function `multisig_payload_internal` + + + +
fun multisig_payload_internal(): option::Option<transaction_context::MultisigPayload>
+
+ + + +
+Implementation + + +
native fun multisig_payload_internal(): Option<MultisigPayload>;
+
+ + + +
+ + + +## Function `multisig_address` + +Returns the multisig account address of the multisig payload. + + +
public fun multisig_address(payload: &transaction_context::MultisigPayload): address
+
+ + + +
+Implementation + + +
public fun multisig_address(payload: &MultisigPayload): address {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.multisig_address
+}
+
+ + + +
+ + + +## Function `inner_entry_function_payload` + +Returns the inner entry function payload of the multisig payload. + + +
public fun inner_entry_function_payload(payload: &transaction_context::MultisigPayload): option::Option<transaction_context::EntryFunctionPayload>
+
+ + + +
+Implementation + + +
public fun inner_entry_function_payload(payload: &MultisigPayload): Option<EntryFunctionPayload> {
+    assert!(features::transaction_context_extension_enabled(), error::invalid_state(ETRANSACTION_CONTEXT_EXTENSION_NOT_ENABLED));
+    payload.entry_function_payload
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `get_txn_hash` + + +
fun get_txn_hash(): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_get_txn_hash();
+
+ + + + + + + +
fun spec_get_txn_hash(): vector<u8>;
+
+ + + + + +### Function `get_transaction_hash` + + +
public fun get_transaction_hash(): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_get_txn_hash();
+// This enforces high-level requirement 1:
+ensures [abstract] len(result) == 32;
+
+ + + + + +### Function `generate_unique_address` + + +
fun generate_unique_address(): address
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_generate_unique_address();
+
+ + + + + + + +
fun spec_generate_unique_address(): address;
+
+ + + + + +### Function `generate_auid_address` + + +
public fun generate_auid_address(): address
+
+ + + + +
pragma opaque;
+// This enforces high-level requirement 3:
+ensures [abstract] result == spec_generate_unique_address();
+
+ + + + + +### Function `get_script_hash` + + +
public fun get_script_hash(): vector<u8>
+
+ + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Fetching the transaction hash should return a vector with 32 bytes.MediumThe get_transaction_hash function calls the native function get_txn_hash, which fetches the NativeTransactionContext struct and returns the txn_hash field.Audited that the native function returns the txn hash, whose size is 32 bytes. This has been modeled as the abstract postcondition that the returned vector is of length 32. Formally verified via get_txn_hash.
2Fetching the unique address should never abort.LowThe function auid_address returns the unique address from a supplied AUID resource.Formally verified via auid_address.
3Generating the unique address should return a vector with 32 bytes.MediumThe generate_auid_address function checks calls the native function generate_unique_address which fetches the NativeTransactionContext struct, increments the auid_counter by one, and then creates a new authentication key from a preimage, which is then returned.Audited that the native function returns an address, and the length of an address is 32 bytes. This has been modeled as the abstract postcondition that the returned vector is of length 32. Formally verified via generate_auid_address.
4Fetching the script hash of the current entry function should never fail and should return a vector with 32 bytes if the transaction payload is a script, otherwise an empty vector.LowThe native function get_script_hash returns the NativeTransactionContext.script_hash field.Audited that the native function holds the required property. This has been modeled as the abstract spec. Formally verified via get_script_hash.
+ + + + + +### Module-level Specification + + +
pragma opaque;
+// This enforces high-level requirement 4:
+aborts_if [abstract] false;
+ensures [abstract] result == spec_get_script_hash();
+ensures [abstract] len(result) == 32;
+
+ + + + + + + +
fun spec_get_script_hash(): vector<u8>;
+
+ + + + + +### Function `auid_address` + + +
public fun auid_address(auid: &transaction_context::AUID): address
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if false;
+
+ + + + + +### Function `sender_internal` + + +
fun sender_internal(): address
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `secondary_signers_internal` + + +
fun secondary_signers_internal(): vector<address>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `gas_payer_internal` + + +
fun gas_payer_internal(): address
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `max_gas_amount_internal` + + +
fun max_gas_amount_internal(): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `gas_unit_price_internal` + + +
fun gas_unit_price_internal(): u64
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `chain_id_internal` + + +
fun chain_id_internal(): u8
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `entry_function_payload_internal` + + +
fun entry_function_payload_internal(): option::Option<transaction_context::EntryFunctionPayload>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `multisig_payload_internal` + + +
fun multisig_payload_internal(): option::Option<transaction_context::MultisigPayload>
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_fee.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_fee.md new file mode 100644 index 0000000000000..8051db864cac9 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_fee.md @@ -0,0 +1,1245 @@ + + + +# Module `0x1::transaction_fee` + +This module provides an interface to burn or collect and redistribute transaction fees. + + +- [Resource `AptosCoinCapabilities`](#0x1_transaction_fee_AptosCoinCapabilities) +- [Resource `AptosFABurnCapabilities`](#0x1_transaction_fee_AptosFABurnCapabilities) +- [Resource `AptosCoinMintCapability`](#0x1_transaction_fee_AptosCoinMintCapability) +- [Resource `CollectedFeesPerBlock`](#0x1_transaction_fee_CollectedFeesPerBlock) +- [Struct `FeeStatement`](#0x1_transaction_fee_FeeStatement) +- [Constants](#@Constants_0) +- [Function `initialize_fee_collection_and_distribution`](#0x1_transaction_fee_initialize_fee_collection_and_distribution) +- [Function `is_fees_collection_enabled`](#0x1_transaction_fee_is_fees_collection_enabled) +- [Function `upgrade_burn_percentage`](#0x1_transaction_fee_upgrade_burn_percentage) +- [Function `register_proposer_for_fee_collection`](#0x1_transaction_fee_register_proposer_for_fee_collection) +- [Function `burn_coin_fraction`](#0x1_transaction_fee_burn_coin_fraction) +- [Function `process_collected_fees`](#0x1_transaction_fee_process_collected_fees) +- [Function `burn_fee`](#0x1_transaction_fee_burn_fee) +- [Function `mint_and_refund`](#0x1_transaction_fee_mint_and_refund) +- [Function `collect_fee`](#0x1_transaction_fee_collect_fee) +- [Function `store_aptos_coin_burn_cap`](#0x1_transaction_fee_store_aptos_coin_burn_cap) +- [Function `convert_to_aptos_fa_burn_ref`](#0x1_transaction_fee_convert_to_aptos_fa_burn_ref) +- [Function `store_aptos_coin_mint_cap`](#0x1_transaction_fee_store_aptos_coin_mint_cap) +- [Function `initialize_storage_refund`](#0x1_transaction_fee_initialize_storage_refund) +- [Function `emit_fee_statement`](#0x1_transaction_fee_emit_fee_statement) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Resource `CollectedFeesPerBlock`](#@Specification_1_CollectedFeesPerBlock) + - [Function `initialize_fee_collection_and_distribution`](#@Specification_1_initialize_fee_collection_and_distribution) + - [Function `upgrade_burn_percentage`](#@Specification_1_upgrade_burn_percentage) + - [Function `register_proposer_for_fee_collection`](#@Specification_1_register_proposer_for_fee_collection) + - [Function `burn_coin_fraction`](#@Specification_1_burn_coin_fraction) + - [Function `process_collected_fees`](#@Specification_1_process_collected_fees) + - [Function `burn_fee`](#@Specification_1_burn_fee) + - [Function `mint_and_refund`](#@Specification_1_mint_and_refund) + - [Function `collect_fee`](#@Specification_1_collect_fee) + - [Function `store_aptos_coin_burn_cap`](#@Specification_1_store_aptos_coin_burn_cap) + - [Function `store_aptos_coin_mint_cap`](#@Specification_1_store_aptos_coin_mint_cap) + - [Function `initialize_storage_refund`](#@Specification_1_initialize_storage_refund) + - [Function `emit_fee_statement`](#@Specification_1_emit_fee_statement) + + +
use 0x1::aptos_account;
+use 0x1::aptos_coin;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::fungible_asset;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::stake;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `AptosCoinCapabilities` + +Stores burn capability to burn the gas fees. + + +
struct AptosCoinCapabilities has key
+
+ + + +
+Fields + + +
+
+burn_cap: coin::BurnCapability<aptos_coin::AptosCoin> +
+
+ +
+
+ + +
+ + + +## Resource `AptosFABurnCapabilities` + +Stores burn capability to burn the gas fees. + + +
struct AptosFABurnCapabilities has key
+
+ + + +
+Fields + + +
+
+burn_ref: fungible_asset::BurnRef +
+
+ +
+
+ + +
+ + + +## Resource `AptosCoinMintCapability` + +Stores mint capability to mint the refunds. + + +
struct AptosCoinMintCapability has key
+
+ + + +
+Fields + + +
+
+mint_cap: coin::MintCapability<aptos_coin::AptosCoin> +
+
+ +
+
+ + +
+ + + +## Resource `CollectedFeesPerBlock` + +Stores information about the block proposer and the amount of fees +collected when executing the block. + + +
struct CollectedFeesPerBlock has key
+
+ + + +
+Fields + + +
+
+amount: coin::AggregatableCoin<aptos_coin::AptosCoin> +
+
+ +
+
+proposer: option::Option<address> +
+
+ +
+
+burn_percentage: u8 +
+
+ +
+
+ + +
+ + + +## Struct `FeeStatement` + +Breakdown of fee charge and refund for a transaction. +The structure is: + +- Net charge or refund (not in the statement) +- total charge: total_charge_gas_units, matches gas_used in the on-chain TransactionInfo. +This is the sum of the sub-items below. Notice that there's potential precision loss when +the conversion between internal and external gas units and between native token and gas +units, so it's possible that the numbers don't add up exactly. -- This number is the final +charge, while the break down is merely informational. +- gas charge for execution (CPU time): execution_gas_units +- gas charge for IO (storage random access): io_gas_units +- storage fee charge (storage space): storage_fee_octas, to be included in +total_charge_gas_unit, this number is converted to gas units according to the user +specified gas_unit_price on the transaction. +- storage deletion refund: storage_fee_refund_octas, this is not included in gas_used or +total_charge_gas_units, the net charge / refund is calculated by +total_charge_gas_units * gas_unit_price - storage_fee_refund_octas. + +This is meant to emitted as a module event. + + +
#[event]
+struct FeeStatement has drop, store
+
+ + + +
+Fields + + +
+
+total_charge_gas_units: u64 +
+
+ Total gas charge. +
+
+execution_gas_units: u64 +
+
+ Execution gas charge. +
+
+io_gas_units: u64 +
+
+ IO gas charge. +
+
+storage_fee_octas: u64 +
+
+ Storage fee charge. +
+
+storage_fee_refund_octas: u64 +
+
+ Storage fee refund. +
+
+ + +
+ + + +## Constants + + + + +Gas fees are already being collected and the struct holding +information about collected amounts is already published. + + +
const EALREADY_COLLECTING_FEES: u64 = 1;
+
+ + + + + + + +
const EFA_GAS_CHARGING_NOT_ENABLED: u64 = 5;
+
+ + + + + +The burn percentage is out of range [0, 100]. + + +
const EINVALID_BURN_PERCENTAGE: u64 = 3;
+
+ + + + + +No longer supported. + + +
const ENO_LONGER_SUPPORTED: u64 = 4;
+
+ + + + + +## Function `initialize_fee_collection_and_distribution` + +Initializes the resource storing information about gas fees collection and +distribution. Should be called by on-chain governance. + + +
public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8)
+
+ + + +
+Implementation + + +
public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        !exists<CollectedFeesPerBlock>(@aptos_framework),
+        error::already_exists(EALREADY_COLLECTING_FEES)
+    );
+    assert!(burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE));
+
+    // Make sure stakng module is aware of transaction fees collection.
+    stake::initialize_validator_fees(aptos_framework);
+
+    // Initially, no fees are collected and the block proposer is not set.
+    let collected_fees = CollectedFeesPerBlock {
+        amount: coin::initialize_aggregatable_coin(aptos_framework),
+        proposer: option::none(),
+        burn_percentage,
+    };
+    move_to(aptos_framework, collected_fees);
+}
+
+ + + +
+ + + +## Function `is_fees_collection_enabled` + + + +
fun is_fees_collection_enabled(): bool
+
+ + + +
+Implementation + + +
fun is_fees_collection_enabled(): bool {
+    exists<CollectedFeesPerBlock>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `upgrade_burn_percentage` + +Sets the burn percentage for collected fees to a new value. Should be called by on-chain governance. + + +
public fun upgrade_burn_percentage(aptos_framework: &signer, new_burn_percentage: u8)
+
+ + + +
+Implementation + + +
public fun upgrade_burn_percentage(
+    aptos_framework: &signer,
+    new_burn_percentage: u8
+) acquires AptosCoinCapabilities, CollectedFeesPerBlock {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(new_burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE));
+
+    // Prior to upgrading the burn percentage, make sure to process collected
+    // fees. Otherwise we would use the new (incorrect) burn_percentage when
+    // processing fees later!
+    process_collected_fees();
+
+    if (is_fees_collection_enabled()) {
+        // Upgrade has no effect unless fees are being collected.
+        let burn_percentage = &mut borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework).burn_percentage;
+        *burn_percentage = new_burn_percentage
+    }
+}
+
+ + + +
+ + + +## Function `register_proposer_for_fee_collection` + +Registers the proposer of the block for gas fees collection. This function +can only be called at the beginning of the block. + + +
public(friend) fun register_proposer_for_fee_collection(proposer_addr: address)
+
+ + + +
+Implementation + + +
public(friend) fun register_proposer_for_fee_collection(proposer_addr: address) acquires CollectedFeesPerBlock {
+    if (is_fees_collection_enabled()) {
+        let collected_fees = borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework);
+        let _ = option::swap_or_fill(&mut collected_fees.proposer, proposer_addr);
+    }
+}
+
+ + + +
+ + + +## Function `burn_coin_fraction` + +Burns a specified fraction of the coin. + + +
fun burn_coin_fraction(coin: &mut coin::Coin<aptos_coin::AptosCoin>, burn_percentage: u8)
+
+ + + +
+Implementation + + +
fun burn_coin_fraction(coin: &mut Coin<AptosCoin>, burn_percentage: u8) acquires AptosCoinCapabilities {
+    assert!(burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE));
+
+    let collected_amount = coin::value(coin);
+    spec {
+        // We assume that `burn_percentage * collected_amount` does not overflow.
+        assume burn_percentage * collected_amount <= MAX_U64;
+    };
+    let amount_to_burn = (burn_percentage as u64) * collected_amount / 100;
+    if (amount_to_burn > 0) {
+        let coin_to_burn = coin::extract(coin, amount_to_burn);
+        coin::burn(
+            coin_to_burn,
+            &borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap,
+        );
+    }
+}
+
+ + + +
+ + + +## Function `process_collected_fees` + +Calculates the fee which should be distributed to the block proposer at the +end of an epoch, and records it in the system. This function can only be called +at the beginning of the block or during reconfiguration. + + +
public(friend) fun process_collected_fees()
+
+ + + +
+Implementation + + +
public(friend) fun process_collected_fees() acquires AptosCoinCapabilities, CollectedFeesPerBlock {
+    if (!is_fees_collection_enabled()) {
+        return
+    };
+    let collected_fees = borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework);
+
+    // If there are no collected fees, only unset the proposer. See the rationale for
+    // setting proposer to option::none() below.
+    if (coin::is_aggregatable_coin_zero(&collected_fees.amount)) {
+        if (option::is_some(&collected_fees.proposer)) {
+            let _ = option::extract(&mut collected_fees.proposer);
+        };
+        return
+    };
+
+    // Otherwise get the collected fee, and check if it can distributed later.
+    let coin = coin::drain_aggregatable_coin(&mut collected_fees.amount);
+    if (option::is_some(&collected_fees.proposer)) {
+        // Extract the address of proposer here and reset it to option::none(). This
+        // is particularly useful to avoid any undesired side-effects where coins are
+        // collected but never distributed or distributed to the wrong account.
+        // With this design, processing collected fees enforces that all fees will be burnt
+        // unless the proposer is specified in the block prologue. When we have a governance
+        // proposal that triggers reconfiguration, we distribute pending fees and burn the
+        // fee for the proposal. Otherwise, that fee would be leaked to the next block.
+        let proposer = option::extract(&mut collected_fees.proposer);
+
+        // Since the block can be produced by the VM itself, we have to make sure we catch
+        // this case.
+        if (proposer == @vm_reserved) {
+            burn_coin_fraction(&mut coin, 100);
+            coin::destroy_zero(coin);
+            return
+        };
+
+        burn_coin_fraction(&mut coin, collected_fees.burn_percentage);
+        stake::add_transaction_fee(proposer, coin);
+        return
+    };
+
+    // If checks did not pass, simply burn all collected coins and return none.
+    burn_coin_fraction(&mut coin, 100);
+    coin::destroy_zero(coin)
+}
+
+ + + +
+ + + +## Function `burn_fee` + +Burn transaction fees in epilogue. + + +
public(friend) fun burn_fee(account: address, fee: u64)
+
+ + + +
+Implementation + + +
public(friend) fun burn_fee(account: address, fee: u64) acquires AptosFABurnCapabilities, AptosCoinCapabilities {
+    if (exists<AptosFABurnCapabilities>(@aptos_framework)) {
+        let burn_ref = &borrow_global<AptosFABurnCapabilities>(@aptos_framework).burn_ref;
+        aptos_account::burn_from_fungible_store(burn_ref, account, fee);
+    } else {
+        let burn_cap = &borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap;
+        if (features::operations_default_to_fa_apt_store_enabled()) {
+            let (burn_ref, burn_receipt) = coin::get_paired_burn_ref(burn_cap);
+            aptos_account::burn_from_fungible_store(&burn_ref, account, fee);
+            coin::return_paired_burn_ref(burn_ref, burn_receipt);
+        } else {
+            coin::burn_from<AptosCoin>(
+                account,
+                fee,
+                burn_cap,
+            );
+        };
+    };
+}
+
+ + + +
+ + + +## Function `mint_and_refund` + +Mint refund in epilogue. + + +
public(friend) fun mint_and_refund(account: address, refund: u64)
+
+ + + +
+Implementation + + +
public(friend) fun mint_and_refund(account: address, refund: u64) acquires AptosCoinMintCapability {
+    let mint_cap = &borrow_global<AptosCoinMintCapability>(@aptos_framework).mint_cap;
+    let refund_coin = coin::mint(refund, mint_cap);
+    coin::force_deposit(account, refund_coin);
+}
+
+ + + +
+ + + +## Function `collect_fee` + +Collect transaction fees in epilogue. + + +
public(friend) fun collect_fee(account: address, fee: u64)
+
+ + + +
+Implementation + + +
public(friend) fun collect_fee(account: address, fee: u64) acquires CollectedFeesPerBlock {
+    let collected_fees = borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework);
+
+    // Here, we are always optimistic and always collect fees. If the proposer is not set,
+    // or we cannot redistribute fees later for some reason (e.g. account cannot receive AptoCoin)
+    // we burn them all at once. This way we avoid having a check for every transaction epilogue.
+    let collected_amount = &mut collected_fees.amount;
+    coin::collect_into_aggregatable_coin<AptosCoin>(account, fee, collected_amount);
+}
+
+ + + +
+ + + +## Function `store_aptos_coin_burn_cap` + +Only called during genesis. + + +
public(friend) fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: coin::BurnCapability<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public(friend) fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: BurnCapability<AptosCoin>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    if (features::operations_default_to_fa_apt_store_enabled()) {
+        let burn_ref = coin::convert_and_take_paired_burn_ref(burn_cap);
+        move_to(aptos_framework, AptosFABurnCapabilities { burn_ref });
+    } else {
+        move_to(aptos_framework, AptosCoinCapabilities { burn_cap })
+    }
+}
+
+ + + +
+ + + +## Function `convert_to_aptos_fa_burn_ref` + + + +
public entry fun convert_to_aptos_fa_burn_ref(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun convert_to_aptos_fa_burn_ref(aptos_framework: &signer) acquires AptosCoinCapabilities {
+    assert!(features::operations_default_to_fa_apt_store_enabled(), EFA_GAS_CHARGING_NOT_ENABLED);
+    system_addresses::assert_aptos_framework(aptos_framework);
+    let AptosCoinCapabilities {
+        burn_cap,
+    } = move_from<AptosCoinCapabilities>(signer::address_of(aptos_framework));
+    let burn_ref = coin::convert_and_take_paired_burn_ref(burn_cap);
+    move_to(aptos_framework, AptosFABurnCapabilities { burn_ref });
+}
+
+ + + +
+ + + +## Function `store_aptos_coin_mint_cap` + +Only called during genesis. + + +
public(friend) fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + + +
+Implementation + + +
public(friend) fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: MintCapability<AptosCoin>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(aptos_framework, AptosCoinMintCapability { mint_cap })
+}
+
+ + + +
+ + + +## Function `initialize_storage_refund` + + + +
#[deprecated]
+public fun initialize_storage_refund(_: &signer)
+
+ + + +
+Implementation + + +
public fun initialize_storage_refund(_: &signer) {
+    abort error::not_implemented(ENO_LONGER_SUPPORTED)
+}
+
+ + + +
+ + + +## Function `emit_fee_statement` + + + +
fun emit_fee_statement(fee_statement: transaction_fee::FeeStatement)
+
+ + + +
+Implementation + + +
fun emit_fee_statement(fee_statement: FeeStatement) {
+    event::emit(fee_statement)
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1Given the blockchain is in an operating state, it guarantees that the Aptos framework signer may burn Aptos coins.CriticalThe AptosCoinCapabilities structure is defined in this module and it stores burn capability to burn the gas fees.Formally Verified via module.
2The initialization function may only be called once.MediumThe initialize_fee_collection_and_distribution function ensures CollectedFeesPerBlock does not already exist.Formally verified via initialize_fee_collection_and_distribution.
3Only the admin address is authorized to call the initialization function.CriticalThe initialize_fee_collection_and_distribution function ensures only the Aptos framework address calls it.Formally verified via initialize_fee_collection_and_distribution.
4The percentage of the burnt collected fee is always a value from 0 to 100.MediumDuring the initialization of CollectedFeesPerBlock in Initialize_fee_collection_and_distribution, and while upgrading burn percentage, it asserts that burn_percentage is within the specified limits.Formally verified via CollectedFeesPerBlock.
5Prior to upgrading the burn percentage, it must process all the fees collected up to that point.CriticalThe upgrade_burn_percentage function ensures process_collected_fees function is called before updating the burn percentage.Formally verified in ProcessCollectedFeesRequiresAndEnsures.
6The presence of the resource, indicating collected fees per block under the Aptos framework account, is a prerequisite for the successful execution of the following functionalities: Upgrading burn percentage. Registering a block proposer. Processing collected fees.LowThe functions: upgrade_burn_percentage, register_proposer_for_fee_collection, and process_collected_fees all ensure that the CollectedFeesPerBlock resource exists under aptos_framework by calling the is_fees_collection_enabled method, which returns a boolean value confirming if the resource exists or not.Formally verified via register_proposer_for_fee_collection, process_collected_fees, and upgrade_burn_percentage.
+ + + + + + +### Module-level Specification + + +
pragma verify = false;
+pragma aborts_if_is_strict;
+// This enforces high-level requirement 1:
+invariant [suspendable] chain_status::is_operating() ==> exists<AptosCoinCapabilities>(@aptos_framework) || exists<AptosFABurnCapabilities>(@aptos_framework);
+
+ + + + + +### Resource `CollectedFeesPerBlock` + + +
struct CollectedFeesPerBlock has key
+
+ + + +
+
+amount: coin::AggregatableCoin<aptos_coin::AptosCoin> +
+
+ +
+
+proposer: option::Option<address> +
+
+ +
+
+burn_percentage: u8 +
+
+ +
+
+ + + +
// This enforces high-level requirement 4:
+invariant burn_percentage <= 100;
+
+ + + + + +### Function `initialize_fee_collection_and_distribution` + + +
public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8)
+
+ + + + +
// This enforces high-level requirement 2:
+aborts_if exists<CollectedFeesPerBlock>(@aptos_framework);
+aborts_if burn_percentage > 100;
+let aptos_addr = signer::address_of(aptos_framework);
+// This enforces high-level requirement 3:
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+aborts_if exists<ValidatorFees>(aptos_addr);
+include system_addresses::AbortsIfNotAptosFramework { account: aptos_framework };
+include aggregator_factory::CreateAggregatorInternalAbortsIf;
+aborts_if exists<CollectedFeesPerBlock>(aptos_addr);
+ensures exists<ValidatorFees>(aptos_addr);
+ensures exists<CollectedFeesPerBlock>(aptos_addr);
+
+ + + + + +### Function `upgrade_burn_percentage` + + +
public fun upgrade_burn_percentage(aptos_framework: &signer, new_burn_percentage: u8)
+
+ + + + +
aborts_if new_burn_percentage > 100;
+let aptos_addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
+// This enforces high-level requirement 5 and high-level requirement 6:
+include ProcessCollectedFeesRequiresAndEnsures;
+ensures exists<CollectedFeesPerBlock>(@aptos_framework) ==>
+    global<CollectedFeesPerBlock>(@aptos_framework).burn_percentage == new_burn_percentage;
+
+ + + + + +### Function `register_proposer_for_fee_collection` + + +
public(friend) fun register_proposer_for_fee_collection(proposer_addr: address)
+
+ + + + +
aborts_if false;
+// This enforces high-level requirement 6:
+ensures is_fees_collection_enabled() ==>
+    option::spec_borrow(global<CollectedFeesPerBlock>(@aptos_framework).proposer) == proposer_addr;
+
+ + + + + +### Function `burn_coin_fraction` + + +
fun burn_coin_fraction(coin: &mut coin::Coin<aptos_coin::AptosCoin>, burn_percentage: u8)
+
+ + + + +
requires burn_percentage <= 100;
+requires exists<AptosCoinCapabilities>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+let amount_to_burn = (burn_percentage * coin::value(coin)) / 100;
+include amount_to_burn > 0 ==> coin::CoinSubAbortsIf<AptosCoin> { amount: amount_to_burn };
+ensures coin.value == old(coin).value - amount_to_burn;
+
+ + + + + + + +
fun collectedFeesAggregator(): AggregatableCoin<AptosCoin> {
+   global<CollectedFeesPerBlock>(@aptos_framework).amount
+}
+
+ + + + + + + +
schema RequiresCollectedFeesPerValueLeqBlockAptosSupply {
+    let maybe_supply = coin::get_coin_supply_opt<AptosCoin>();
+    requires
+        (is_fees_collection_enabled() && option::is_some(maybe_supply)) ==>
+            (aggregator::spec_aggregator_get_val(global<CollectedFeesPerBlock>(@aptos_framework).amount.value) <=
+                optional_aggregator::optional_aggregator_value(
+                    option::spec_borrow(coin::get_coin_supply_opt<AptosCoin>())
+                ));
+}
+
+ + + + + + + +
schema ProcessCollectedFeesRequiresAndEnsures {
+    requires exists<AptosCoinCapabilities>(@aptos_framework);
+    requires exists<stake::ValidatorFees>(@aptos_framework);
+    requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+    include RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+    aborts_if false;
+    let collected_fees = global<CollectedFeesPerBlock>(@aptos_framework);
+    let post post_collected_fees = global<CollectedFeesPerBlock>(@aptos_framework);
+    let pre_amount = aggregator::spec_aggregator_get_val(collected_fees.amount.value);
+    let post post_amount = aggregator::spec_aggregator_get_val(post_collected_fees.amount.value);
+    let fees_table = global<stake::ValidatorFees>(@aptos_framework).fees_table;
+    let post post_fees_table = global<stake::ValidatorFees>(@aptos_framework).fees_table;
+    let proposer = option::spec_borrow(collected_fees.proposer);
+    let fee_to_add = pre_amount - pre_amount * collected_fees.burn_percentage / 100;
+    ensures is_fees_collection_enabled() ==> option::spec_is_none(post_collected_fees.proposer) && post_amount == 0;
+    ensures is_fees_collection_enabled() && aggregator::spec_read(collected_fees.amount.value) > 0 &&
+        option::spec_is_some(collected_fees.proposer) ==>
+        if (proposer != @vm_reserved) {
+            if (table::spec_contains(fees_table, proposer)) {
+                table::spec_get(post_fees_table, proposer).value == table::spec_get(
+                    fees_table,
+                    proposer
+                ).value + fee_to_add
+            } else {
+                table::spec_get(post_fees_table, proposer).value == fee_to_add
+            }
+        } else {
+            option::spec_is_none(post_collected_fees.proposer) && post_amount == 0
+        };
+}
+
+ + + + + +### Function `process_collected_fees` + + +
public(friend) fun process_collected_fees()
+
+ + + + +
// This enforces high-level requirement 6:
+include ProcessCollectedFeesRequiresAndEnsures;
+
+ + + + + +### Function `burn_fee` + + +
public(friend) fun burn_fee(account: address, fee: u64)
+
+ + +AptosCoinCapabilities should be exists. + + +
pragma verify = false;
+aborts_if !exists<AptosCoinCapabilities>(@aptos_framework);
+let account_addr = account;
+let amount = fee;
+let aptos_addr = type_info::type_of<AptosCoin>().account_address;
+let coin_store = global<CoinStore<AptosCoin>>(account_addr);
+let post post_coin_store = global<CoinStore<AptosCoin>>(account_addr);
+aborts_if amount != 0 && !(exists<CoinInfo<AptosCoin>>(aptos_addr)
+    && exists<CoinStore<AptosCoin>>(account_addr));
+aborts_if coin_store.coin.value < amount;
+let maybe_supply = global<CoinInfo<AptosCoin>>(aptos_addr).supply;
+let supply_aggr = option::spec_borrow(maybe_supply);
+let value = optional_aggregator::optional_aggregator_value(supply_aggr);
+let post post_maybe_supply = global<CoinInfo<AptosCoin>>(aptos_addr).supply;
+let post post_supply = option::spec_borrow(post_maybe_supply);
+let post post_value = optional_aggregator::optional_aggregator_value(post_supply);
+aborts_if option::spec_is_some(maybe_supply) && value < amount;
+ensures post_coin_store.coin.value == coin_store.coin.value - amount;
+ensures if (option::spec_is_some(maybe_supply)) {
+    post_value == value - amount
+} else {
+    option::spec_is_none(post_maybe_supply)
+};
+ensures coin::supply<AptosCoin> == old(coin::supply<AptosCoin>) - amount;
+
+ + + + + +### Function `mint_and_refund` + + +
public(friend) fun mint_and_refund(account: address, refund: u64)
+
+ + + + +
pragma verify = false;
+let aptos_addr = type_info::type_of<AptosCoin>().account_address;
+aborts_if (refund != 0) && !exists<CoinInfo<AptosCoin>>(aptos_addr);
+include coin::CoinAddAbortsIf<AptosCoin> { amount: refund };
+aborts_if !exists<CoinStore<AptosCoin>>(account);
+aborts_if !exists<AptosCoinMintCapability>(@aptos_framework);
+let supply = coin::supply<AptosCoin>;
+let post post_supply = coin::supply<AptosCoin>;
+aborts_if [abstract] supply + refund > MAX_U128;
+ensures post_supply == supply + refund;
+
+ + + + + +### Function `collect_fee` + + +
public(friend) fun collect_fee(account: address, fee: u64)
+
+ + + + +
pragma verify = false;
+let collected_fees = global<CollectedFeesPerBlock>(@aptos_framework).amount;
+let aggr = collected_fees.value;
+let coin_store = global<coin::CoinStore<AptosCoin>>(account);
+aborts_if !exists<CollectedFeesPerBlock>(@aptos_framework);
+aborts_if fee > 0 && !exists<coin::CoinStore<AptosCoin>>(account);
+aborts_if fee > 0 && coin_store.coin.value < fee;
+aborts_if fee > 0 && aggregator::spec_aggregator_get_val(aggr)
+    + fee > aggregator::spec_get_limit(aggr);
+aborts_if fee > 0 && aggregator::spec_aggregator_get_val(aggr)
+    + fee > MAX_U128;
+let post post_coin_store = global<coin::CoinStore<AptosCoin>>(account);
+let post post_collected_fees = global<CollectedFeesPerBlock>(@aptos_framework).amount;
+ensures post_coin_store.coin.value == coin_store.coin.value - fee;
+ensures aggregator::spec_aggregator_get_val(post_collected_fees.value) == aggregator::spec_aggregator_get_val(
+    aggr
+) + fee;
+
+ + + + + +### Function `store_aptos_coin_burn_cap` + + +
public(friend) fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: coin::BurnCapability<aptos_coin::AptosCoin>)
+
+ + +Ensure caller is admin. +Aborts if AptosCoinCapabilities already exists. + + +
pragma verify = false;
+let addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(addr);
+aborts_if exists<AptosFABurnCapabilities>(addr);
+aborts_if exists<AptosCoinCapabilities>(addr);
+ensures exists<AptosFABurnCapabilities>(addr) || exists<AptosCoinCapabilities>(addr);
+
+ + + + + +### Function `store_aptos_coin_mint_cap` + + +
public(friend) fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
+
+ + +Ensure caller is admin. +Aborts if AptosCoinMintCapability already exists. + + +
let addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(addr);
+aborts_if exists<AptosCoinMintCapability>(addr);
+ensures exists<AptosCoinMintCapability>(addr);
+
+ + + + + +### Function `initialize_storage_refund` + + +
#[deprecated]
+public fun initialize_storage_refund(_: &signer)
+
+ + +Historical. Aborts. + + +
aborts_if true;
+
+ + + + + +### Function `emit_fee_statement` + + +
fun emit_fee_statement(fee_statement: transaction_fee::FeeStatement)
+
+ + +Aborts if module event feature is not enabled. + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_validation.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_validation.md new file mode 100644 index 0000000000000..db66b1d46926d --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/transaction_validation.md @@ -0,0 +1,1438 @@ + + + +# Module `0x1::transaction_validation` + + + +- [Resource `TransactionValidation`](#0x1_transaction_validation_TransactionValidation) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_transaction_validation_initialize) +- [Function `prologue_common`](#0x1_transaction_validation_prologue_common) +- [Function `script_prologue`](#0x1_transaction_validation_script_prologue) +- [Function `script_prologue_extended`](#0x1_transaction_validation_script_prologue_extended) +- [Function `multi_agent_script_prologue`](#0x1_transaction_validation_multi_agent_script_prologue) +- [Function `multi_agent_script_prologue_extended`](#0x1_transaction_validation_multi_agent_script_prologue_extended) +- [Function `multi_agent_common_prologue`](#0x1_transaction_validation_multi_agent_common_prologue) +- [Function `fee_payer_script_prologue`](#0x1_transaction_validation_fee_payer_script_prologue) +- [Function `fee_payer_script_prologue_extended`](#0x1_transaction_validation_fee_payer_script_prologue_extended) +- [Function `epilogue`](#0x1_transaction_validation_epilogue) +- [Function `epilogue_extended`](#0x1_transaction_validation_epilogue_extended) +- [Function `epilogue_gas_payer`](#0x1_transaction_validation_epilogue_gas_payer) +- [Function `epilogue_gas_payer_extended`](#0x1_transaction_validation_epilogue_gas_payer_extended) +- [Function `skip_auth_key_check`](#0x1_transaction_validation_skip_auth_key_check) +- [Function `skip_gas_payment`](#0x1_transaction_validation_skip_gas_payment) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `prologue_common`](#@Specification_1_prologue_common) + - [Function `script_prologue`](#@Specification_1_script_prologue) + - [Function `script_prologue_extended`](#@Specification_1_script_prologue_extended) + - [Function `multi_agent_script_prologue`](#@Specification_1_multi_agent_script_prologue) + - [Function `multi_agent_script_prologue_extended`](#@Specification_1_multi_agent_script_prologue_extended) + - [Function `multi_agent_common_prologue`](#@Specification_1_multi_agent_common_prologue) + - [Function `fee_payer_script_prologue`](#@Specification_1_fee_payer_script_prologue) + - [Function `fee_payer_script_prologue_extended`](#@Specification_1_fee_payer_script_prologue_extended) + - [Function `epilogue`](#@Specification_1_epilogue) + - [Function `epilogue_extended`](#@Specification_1_epilogue_extended) + - [Function `epilogue_gas_payer`](#@Specification_1_epilogue_gas_payer) + - [Function `epilogue_gas_payer_extended`](#@Specification_1_epilogue_gas_payer_extended) + + +
use 0x1::account;
+use 0x1::aptos_account;
+use 0x1::aptos_coin;
+use 0x1::bcs;
+use 0x1::chain_id;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::features;
+use 0x1::signer;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+use 0x1::transaction_fee;
+use 0x1::vector;
+
+ + + + + +## Resource `TransactionValidation` + +This holds information that will be picked up by the VM to call the +correct chain-specific prologue and epilogue functions + + +
struct TransactionValidation has key
+
+ + + +
+Fields + + +
+
+module_addr: address +
+
+ +
+
+module_name: vector<u8> +
+
+ +
+
+script_prologue_name: vector<u8> +
+
+ +
+
+module_prologue_name: vector<u8> +
+
+ +
+
+multi_agent_prologue_name: vector<u8> +
+
+ +
+
+user_epilogue_name: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +MSB is used to indicate a gas payer tx + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + +Transaction exceeded its allocated max gas + + +
const EOUT_OF_GAS: u64 = 6;
+
+ + + + + + + +
const PROLOGUE_EACCOUNT_DOES_NOT_EXIST: u64 = 1004;
+
+ + + + + + + +
const PROLOGUE_EBAD_CHAIN_ID: u64 = 1007;
+
+ + + + + + + +
const PROLOGUE_ECANT_PAY_GAS_DEPOSIT: u64 = 1005;
+
+ + + + + + + +
const PROLOGUE_EFEE_PAYER_NOT_ENABLED: u64 = 1010;
+
+ + + + + +Prologue errors. These are separated out from the other errors in this +module since they are mapped separately to major VM statuses, and are +important to the semantics of the system. + + +
const PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY: u64 = 1001;
+
+ + + + + + + +
const PROLOGUE_ESECONDARY_KEYS_ADDRESSES_COUNT_MISMATCH: u64 = 1009;
+
+ + + + + + + +
const PROLOGUE_ESEQUENCE_NUMBER_TOO_BIG: u64 = 1008;
+
+ + + + + + + +
const PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW: u64 = 1003;
+
+ + + + + + + +
const PROLOGUE_ESEQUENCE_NUMBER_TOO_OLD: u64 = 1002;
+
+ + + + + + + +
const PROLOGUE_ETRANSACTION_EXPIRED: u64 = 1006;
+
+ + + + + +## Function `initialize` + +Only called during genesis to initialize system resources for this module. + + +
public(friend) fun initialize(aptos_framework: &signer, script_prologue_name: vector<u8>, module_prologue_name: vector<u8>, multi_agent_prologue_name: vector<u8>, user_epilogue_name: vector<u8>)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(
+    aptos_framework: &signer,
+    script_prologue_name: vector<u8>,
+    // module_prologue_name is deprecated and not used.
+    module_prologue_name: vector<u8>,
+    multi_agent_prologue_name: vector<u8>,
+    user_epilogue_name: vector<u8>,
+) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    move_to(aptos_framework, TransactionValidation {
+        module_addr: @aptos_framework,
+        module_name: b"transaction_validation",
+        script_prologue_name,
+        // module_prologue_name is deprecated and not used.
+        module_prologue_name,
+        multi_agent_prologue_name,
+        user_epilogue_name,
+    });
+}
+
+ + + +
+ + + +## Function `prologue_common` + + + +
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun prologue_common(
+    sender: signer,
+    gas_payer: address,
+    txn_sequence_number: u64,
+    txn_authentication_key: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    assert!(
+        timestamp::now_seconds() < txn_expiration_time,
+        error::invalid_argument(PROLOGUE_ETRANSACTION_EXPIRED),
+    );
+    assert!(chain_id::get() == chain_id, error::invalid_argument(PROLOGUE_EBAD_CHAIN_ID));
+
+    let transaction_sender = signer::address_of(&sender);
+
+    if (
+        transaction_sender == gas_payer
+            || account::exists_at(transaction_sender)
+            || !features::sponsored_automatic_account_creation_enabled()
+            || txn_sequence_number > 0
+    ) {
+        assert!(account::exists_at(transaction_sender), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST));
+        if (!features::transaction_simulation_enhancement_enabled() ||
+                !skip_auth_key_check(is_simulation, &txn_authentication_key)) {
+            assert!(
+                txn_authentication_key == account::get_authentication_key(transaction_sender),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+            )
+        };
+
+        let account_sequence_number = account::get_sequence_number(transaction_sender);
+        assert!(
+            txn_sequence_number < (1u64 << 63),
+            error::out_of_range(PROLOGUE_ESEQUENCE_NUMBER_TOO_BIG)
+        );
+
+        assert!(
+            txn_sequence_number >= account_sequence_number,
+            error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_OLD)
+        );
+
+        assert!(
+            txn_sequence_number == account_sequence_number,
+            error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW)
+        );
+    } else {
+        // In this case, the transaction is sponsored and the account does not exist, so ensure
+        // the default values match.
+        assert!(
+            txn_sequence_number == 0,
+            error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW)
+        );
+
+        if (!features::transaction_simulation_enhancement_enabled() ||
+                !skip_auth_key_check(is_simulation, &txn_authentication_key)) {
+            assert!(
+                txn_authentication_key == bcs::to_bytes(&transaction_sender),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+            );
+        }
+    };
+
+    let max_transaction_fee = txn_gas_price * txn_max_gas_units;
+
+    if (!features::transaction_simulation_enhancement_enabled() || !skip_gas_payment(is_simulation, gas_payer)) {
+        if (features::operations_default_to_fa_apt_store_enabled()) {
+            assert!(
+                aptos_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee),
+                error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT)
+            );
+        } else {
+            assert!(
+                coin::is_balance_at_least<AptosCoin>(gas_payer, max_transaction_fee),
+                error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT)
+            );
+        }
+    }
+}
+
+ + + +
+ + + +## Function `script_prologue` + + + +
fun script_prologue(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>)
+
+ + + +
+Implementation + + +
fun script_prologue(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_public_key: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    _script_hash: vector<u8>,
+) {
+    let gas_payer = signer::address_of(&sender);
+    // prologue_common with is_simulation set to false behaves identically to the original script_prologue function.
+    prologue_common(
+        sender,
+        gas_payer,
+        txn_sequence_number,
+        txn_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        false,
+    )
+}
+
+ + + +
+ + + +## Function `script_prologue_extended` + + + +
fun script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_public_key: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    _script_hash: vector<u8>,
+    is_simulation: bool,
+) {
+    let gas_payer = signer::address_of(&sender);
+    prologue_common(
+        sender,
+        gas_payer,
+        txn_sequence_number,
+        txn_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    )
+}
+
+ + + +
+ + + +## Function `multi_agent_script_prologue` + + + +
fun multi_agent_script_prologue(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
+ + + +
+Implementation + + +
fun multi_agent_script_prologue(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+) {
+    let sender_addr = signer::address_of(&sender);
+    // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the
+    // original multi_agent_script_prologue function.
+    prologue_common(
+        sender,
+        sender_addr,
+        txn_sequence_number,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        false,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, false);
+}
+
+ + + +
+ + + +## Function `multi_agent_script_prologue_extended` + + + +
fun multi_agent_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun multi_agent_script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    let sender_addr = signer::address_of(&sender);
+    prologue_common(
+        sender,
+        sender_addr,
+        txn_sequence_number,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation);
+}
+
+ + + +
+ + + +## Function `multi_agent_common_prologue` + + + +
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun multi_agent_common_prologue(
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    is_simulation: bool,
+) {
+    let num_secondary_signers = vector::length(&secondary_signer_addresses);
+    assert!(
+        vector::length(&secondary_signer_public_key_hashes) == num_secondary_signers,
+        error::invalid_argument(PROLOGUE_ESECONDARY_KEYS_ADDRESSES_COUNT_MISMATCH),
+    );
+
+    let i = 0;
+    while ({
+        spec {
+            invariant i <= num_secondary_signers;
+            invariant forall j in 0..i:
+                account::exists_at(secondary_signer_addresses[j]);
+            invariant forall j in 0..i:
+                secondary_signer_public_key_hashes[j] == account::get_authentication_key(secondary_signer_addresses[j]) ||
+                    (features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(secondary_signer_public_key_hashes[j]));
+        };
+        (i < num_secondary_signers)
+    }) {
+        let secondary_address = *vector::borrow(&secondary_signer_addresses, i);
+        assert!(account::exists_at(secondary_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST));
+
+        let signer_public_key_hash = *vector::borrow(&secondary_signer_public_key_hashes, i);
+        if (!features::transaction_simulation_enhancement_enabled() ||
+                !skip_auth_key_check(is_simulation, &signer_public_key_hash)) {
+            assert!(
+                signer_public_key_hash == account::get_authentication_key(secondary_address),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+            )
+        };
+        i = i + 1;
+    }
+}
+
+ + + +
+ + + +## Function `fee_payer_script_prologue` + + + +
fun fee_payer_script_prologue(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
+ + + +
+Implementation + + +
fun fee_payer_script_prologue(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    fee_payer_address: address,
+    fee_payer_public_key_hash: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+) {
+    assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED));
+    // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the
+    // original fee_payer_script_prologue function.
+    prologue_common(
+        sender,
+        fee_payer_address,
+        txn_sequence_number,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        false,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, false);
+    assert!(
+        fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address),
+        error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+    );
+}
+
+ + + +
+ + + +## Function `fee_payer_script_prologue_extended` + + + +
fun fee_payer_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun fee_payer_script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    fee_payer_address: address,
+    fee_payer_public_key_hash: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED));
+    prologue_common(
+        sender,
+        fee_payer_address,
+        txn_sequence_number,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation);
+    if (!features::transaction_simulation_enhancement_enabled() ||
+        !skip_auth_key_check(is_simulation, &fee_payer_public_key_hash)) {
+        assert!(
+            fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address),
+            error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+        )
+    }
+}
+
+ + + +
+ + + +## Function `epilogue` + +Epilogue function is run after a transaction is successfully executed. +Called by the Adapter + + +
fun epilogue(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64)
+
+ + + +
+Implementation + + +
fun epilogue(
+    account: signer,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+) {
+    let addr = signer::address_of(&account);
+    epilogue_gas_payer(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining);
+}
+
+ + + +
+ + + +## Function `epilogue_extended` + + + +
fun epilogue_extended(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun epilogue_extended(
+    account: signer,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
+) {
+    let addr = signer::address_of(&account);
+    epilogue_gas_payer_extended(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining, is_simulation);
+}
+
+ + + +
+ + + +## Function `epilogue_gas_payer` + +Epilogue function with explicit gas payer specified, is run after a transaction is successfully executed. +Called by the Adapter + + +
fun epilogue_gas_payer(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64)
+
+ + + +
+Implementation + + +
fun epilogue_gas_payer(
+    account: signer,
+    gas_payer: address,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+) {
+    // epilogue_gas_payer_extended with is_simulation set to false behaves identically to the original
+    // epilogue_gas_payer function.
+    epilogue_gas_payer_extended(
+        account,
+        gas_payer,
+        storage_fee_refunded,
+        txn_gas_price,
+        txn_max_gas_units,
+        gas_units_remaining,
+        false,
+    );
+}
+
+ + + +
+ + + +## Function `epilogue_gas_payer_extended` + + + +
fun epilogue_gas_payer_extended(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun epilogue_gas_payer_extended(
+    account: signer,
+    gas_payer: address,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
+) {
+    assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS));
+    let gas_used = txn_max_gas_units - gas_units_remaining;
+
+    assert!(
+        (txn_gas_price as u128) * (gas_used as u128) <= MAX_U64,
+        error::out_of_range(EOUT_OF_GAS)
+    );
+    let transaction_fee_amount = txn_gas_price * gas_used;
+
+    // it's important to maintain the error code consistent with vm
+    // to do failed transaction cleanup.
+    if (!features::transaction_simulation_enhancement_enabled() || !skip_gas_payment(is_simulation, gas_payer)) {
+        if (features::operations_default_to_fa_apt_store_enabled()) {
+            assert!(
+                aptos_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        } else {
+            assert!(
+                coin::is_balance_at_least<AptosCoin>(gas_payer, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        };
+
+        let amount_to_burn = if (features::collect_and_distribute_gas_fees()) {
+            // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track
+            // it separately, so that we don't increase the total supply by refunding.
+
+            // If transaction fees are redistributed to validators, collect them here for
+            // later redistribution.
+            transaction_fee::collect_fee(gas_payer, transaction_fee_amount);
+            0
+        } else {
+            // Otherwise, just burn the fee.
+            // TODO: this branch should be removed completely when transaction fee collection
+            // is tested and is fully proven to work well.
+            transaction_fee_amount
+        };
+
+        if (amount_to_burn > storage_fee_refunded) {
+            let burn_amount = amount_to_burn - storage_fee_refunded;
+            transaction_fee::burn_fee(gas_payer, burn_amount);
+        } else if (amount_to_burn < storage_fee_refunded) {
+            let mint_amount = storage_fee_refunded - amount_to_burn;
+            transaction_fee::mint_and_refund(gas_payer, mint_amount)
+        };
+    };
+
+    // Increment sequence number
+    let addr = signer::address_of(&account);
+    account::increment_sequence_number(addr);
+}
+
+ + + +
+ + + +## Function `skip_auth_key_check` + + + +
fun skip_auth_key_check(is_simulation: bool, auth_key: &vector<u8>): bool
+
+ + + +
+Implementation + + +
inline fun skip_auth_key_check(is_simulation: bool, auth_key: &vector<u8>): bool {
+    is_simulation && vector::is_empty(auth_key)
+}
+
+ + + +
+ + + +## Function `skip_gas_payment` + + + +
fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool
+
+ + + +
+Implementation + + +
inline fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool {
+    is_simulation && gas_payer == @0x0
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The sender of a transaction should have sufficient coin balance to pay the transaction fee.HighThe prologue_common function asserts that the transaction sender has enough coin balance to be paid as the max_transaction_fee.Formally verified via PrologueCommonAbortsIf. Moreover, the native transaction validation patterns have been manually audited.
2All secondary signer addresses are verified to be authentic through a validation process.CriticalThe function multi_agent_script_prologue ensures that each secondary signer address undergoes authentication validation, including verification of account existence and authentication key matching, confirming their authenticity.Formally verified via multi_agent_script_prologue. Moreover, the native transaction validation patterns have been manually audited.
3After successful execution, base the transaction fee on the configuration set by the features library.HighThe epilogue function collects the transaction fee for either redistribution or burning based on the feature::collect_and_distribute_gas_fees result.Formally Verified via epilogue. Moreover, the native transaction validation patterns have been manually audited.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, script_prologue_name: vector<u8>, module_prologue_name: vector<u8>, multi_agent_prologue_name: vector<u8>, user_epilogue_name: vector<u8>)
+
+ + +Ensure caller is aptos_framework. +Aborts if TransactionValidation already exists. + + +
let addr = signer::address_of(aptos_framework);
+aborts_if !system_addresses::is_aptos_framework_address(addr);
+aborts_if exists<TransactionValidation>(addr);
+ensures exists<TransactionValidation>(addr);
+
+ + +Create a schema to reuse some code. +Give some constraints that may abort according to the conditions. + + + + + +
schema PrologueCommonAbortsIf {
+    sender: signer;
+    gas_payer: address;
+    txn_sequence_number: u64;
+    txn_authentication_key: vector<u8>;
+    txn_gas_price: u64;
+    txn_max_gas_units: u64;
+    txn_expiration_time: u64;
+    chain_id: u8;
+    aborts_if !exists<CurrentTimeMicroseconds>(@aptos_framework);
+    aborts_if !(timestamp::now_seconds() < txn_expiration_time);
+    aborts_if !exists<ChainId>(@aptos_framework);
+    aborts_if !(chain_id::get() == chain_id);
+    let transaction_sender = signer::address_of(sender);
+    aborts_if (
+        !features::spec_is_enabled(features::SPONSORED_AUTOMATIC_ACCOUNT_CREATION)
+            || account::exists_at(transaction_sender)
+            || transaction_sender == gas_payer
+            || txn_sequence_number > 0
+    ) && (
+        !(txn_sequence_number >= global<Account>(transaction_sender).sequence_number)
+            || !(txn_authentication_key == global<Account>(transaction_sender).authentication_key)
+            || !account::exists_at(transaction_sender)
+            || !(txn_sequence_number == global<Account>(transaction_sender).sequence_number)
+    );
+    aborts_if features::spec_is_enabled(features::SPONSORED_AUTOMATIC_ACCOUNT_CREATION)
+        && transaction_sender != gas_payer
+        && txn_sequence_number == 0
+        && !account::exists_at(transaction_sender)
+        && txn_authentication_key != bcs::to_bytes(transaction_sender);
+    aborts_if !(txn_sequence_number < (1u64 << 63));
+    let max_transaction_fee = txn_gas_price * txn_max_gas_units;
+    aborts_if max_transaction_fee > MAX_U64;
+    aborts_if !exists<CoinStore<AptosCoin>>(gas_payer);
+    // This enforces high-level requirement 1:
+    aborts_if !(global<CoinStore<AptosCoin>>(gas_payer).coin.value >= max_transaction_fee);
+}
+
+ + + + + +### Function `prologue_common` + + +
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+include PrologueCommonAbortsIf;
+
+ + + + + +### Function `script_prologue` + + +
fun script_prologue(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>)
+
+ + + + +
pragma verify = false;
+
+ + + + + + + +
schema MultiAgentPrologueCommonAbortsIf {
+    secondary_signer_addresses: vector<address>;
+    secondary_signer_public_key_hashes: vector<vector<u8>>;
+    is_simulation: bool;
+    let num_secondary_signers = len(secondary_signer_addresses);
+    aborts_if len(secondary_signer_public_key_hashes) != num_secondary_signers;
+    // This enforces high-level requirement 2:
+    aborts_if exists i in 0..num_secondary_signers:
+        !account::exists_at(secondary_signer_addresses[i]);
+    aborts_if exists i in 0..num_secondary_signers:
+        !can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]) &&
+            secondary_signer_public_key_hashes[i] !=
+                account::get_authentication_key(secondary_signer_addresses[i]);
+    ensures forall i in 0..num_secondary_signers:
+        account::exists_at(secondary_signer_addresses[i]);
+    ensures forall i in 0..num_secondary_signers:
+        secondary_signer_public_key_hashes[i] == account::get_authentication_key(secondary_signer_addresses[i])
+            || can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]);
+}
+
+ + + + + + + +
fun can_skip(feature_flag: bool, is_simulation: bool, auth_key: vector<u8>): bool {
+   features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(auth_key)
+}
+
+ + + + + +### Function `script_prologue_extended` + + +
fun script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+include PrologueCommonAbortsIf {
+    gas_payer: signer::address_of(sender),
+    txn_authentication_key: txn_public_key
+};
+
+ + + + + +### Function `multi_agent_script_prologue` + + +
fun multi_agent_script_prologue(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `multi_agent_script_prologue_extended` + + +
fun multi_agent_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + +Aborts if length of public key hashed vector +not equal the number of singers. + + +
pragma verify_duration_estimate = 120;
+let gas_payer = signer::address_of(sender);
+pragma verify = false;
+include PrologueCommonAbortsIf {
+    gas_payer,
+    txn_sequence_number,
+    txn_authentication_key: txn_sender_public_key,
+};
+include MultiAgentPrologueCommonAbortsIf {
+    secondary_signer_addresses,
+    secondary_signer_public_key_hashes,
+    is_simulation,
+};
+
+ + + + + +### Function `multi_agent_common_prologue` + + +
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, is_simulation: bool)
+
+ + + + +
include MultiAgentPrologueCommonAbortsIf {
+    secondary_signer_addresses,
+    secondary_signer_public_key_hashes,
+    is_simulation,
+};
+
+ + + + + +### Function `fee_payer_script_prologue` + + +
fun fee_payer_script_prologue(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `fee_payer_script_prologue_extended` + + +
fun fee_payer_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma verify_duration_estimate = 120;
+aborts_if !features::spec_is_enabled(features::FEE_PAYER_ENABLED);
+let gas_payer = fee_payer_address;
+include PrologueCommonAbortsIf {
+    gas_payer,
+    txn_sequence_number,
+    txn_authentication_key: txn_sender_public_key,
+};
+include MultiAgentPrologueCommonAbortsIf {
+    secondary_signer_addresses,
+    secondary_signer_public_key_hashes,
+    is_simulation,
+};
+aborts_if !account::exists_at(gas_payer);
+aborts_if !(fee_payer_public_key_hash == account::get_authentication_key(gas_payer));
+aborts_if !features::spec_fee_payer_enabled();
+
+ + + + + +### Function `epilogue` + + +
fun epilogue(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `epilogue_extended` + + +
fun epilogue_extended(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + +Abort according to the conditions. +AptosCoinCapabilities and CoinInfo should exists. +Skip transaction_fee::burn_fee verification. + + +
pragma verify = false;
+include EpilogueGasPayerAbortsIf { gas_payer: signer::address_of(account) };
+
+ + + + + +### Function `epilogue_gas_payer` + + +
fun epilogue_gas_payer(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + + + +
schema EpilogueGasPayerAbortsIf {
+    account: signer;
+    gas_payer: address;
+    storage_fee_refunded: u64;
+    txn_gas_price: u64;
+    txn_max_gas_units: u64;
+    gas_units_remaining: u64;
+    aborts_if !(txn_max_gas_units >= gas_units_remaining);
+    let gas_used = txn_max_gas_units - gas_units_remaining;
+    aborts_if !(txn_gas_price * gas_used <= MAX_U64);
+    let transaction_fee_amount = txn_gas_price * gas_used;
+    let addr = signer::address_of(account);
+    let pre_account = global<account::Account>(addr);
+    let post account = global<account::Account>(addr);
+    aborts_if !exists<CoinStore<AptosCoin>>(gas_payer);
+    aborts_if !exists<Account>(addr);
+    aborts_if !(global<Account>(addr).sequence_number < MAX_U64);
+    ensures account.sequence_number == pre_account.sequence_number + 1;
+    let collect_fee_enabled = features::spec_is_enabled(features::COLLECT_AND_DISTRIBUTE_GAS_FEES);
+    let collected_fees = global<CollectedFeesPerBlock>(@aptos_framework).amount;
+    let aggr = collected_fees.value;
+    let aggr_val = aggregator::spec_aggregator_get_val(aggr);
+    let aggr_lim = aggregator::spec_get_limit(aggr);
+    // This enforces high-level requirement 3:
+    aborts_if collect_fee_enabled && !exists<CollectedFeesPerBlock>(@aptos_framework);
+    aborts_if collect_fee_enabled && transaction_fee_amount > 0 && aggr_val + transaction_fee_amount > aggr_lim;
+    let amount_to_burn = if (collect_fee_enabled) {
+        0
+    } else {
+        transaction_fee_amount - storage_fee_refunded
+    };
+    let apt_addr = type_info::type_of<AptosCoin>().account_address;
+    let maybe_apt_supply = global<CoinInfo<AptosCoin>>(apt_addr).supply;
+    let total_supply_enabled = option::spec_is_some(maybe_apt_supply);
+    let apt_supply = option::spec_borrow(maybe_apt_supply);
+    let apt_supply_value = optional_aggregator::optional_aggregator_value(apt_supply);
+    let post post_maybe_apt_supply = global<CoinInfo<AptosCoin>>(apt_addr).supply;
+    let post post_apt_supply = option::spec_borrow(post_maybe_apt_supply);
+    let post post_apt_supply_value = optional_aggregator::optional_aggregator_value(post_apt_supply);
+    aborts_if amount_to_burn > 0 && !exists<AptosCoinCapabilities>(@aptos_framework);
+    aborts_if amount_to_burn > 0 && !exists<CoinInfo<AptosCoin>>(apt_addr);
+    aborts_if amount_to_burn > 0 && total_supply_enabled && apt_supply_value < amount_to_burn;
+    ensures total_supply_enabled ==> apt_supply_value - amount_to_burn == post_apt_supply_value;
+    let amount_to_mint = if (collect_fee_enabled) {
+        storage_fee_refunded
+    } else {
+        storage_fee_refunded - transaction_fee_amount
+    };
+    let total_supply = coin::supply<AptosCoin>;
+    let post post_total_supply = coin::supply<AptosCoin>;
+    aborts_if amount_to_mint > 0 && !exists<CoinStore<AptosCoin>>(addr);
+    aborts_if amount_to_mint > 0 && !exists<AptosCoinMintCapability>(@aptos_framework);
+    aborts_if amount_to_mint > 0 && total_supply + amount_to_mint > MAX_U128;
+    ensures amount_to_mint > 0 ==> post_total_supply == total_supply + amount_to_mint;
+    let aptos_addr = type_info::type_of<AptosCoin>().account_address;
+    aborts_if (amount_to_mint != 0) && !exists<coin::CoinInfo<AptosCoin>>(aptos_addr);
+    include coin::CoinAddAbortsIf<AptosCoin> { amount: amount_to_mint };
+}
+
+ + + + + +### Function `epilogue_gas_payer_extended` + + +
fun epilogue_gas_payer_extended(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + +Abort according to the conditions. +AptosCoinCapabilities and CoinInfo should exist. +Skip transaction_fee::burn_fee verification. + + +
pragma verify = false;
+include EpilogueGasPayerAbortsIf;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/util.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/util.md new file mode 100644 index 0000000000000..8a3933b170760 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/util.md @@ -0,0 +1,149 @@ + + + +# Module `0x1::util` + +Utility functions used by the framework modules. + + +- [Function `from_bytes`](#0x1_util_from_bytes) +- [Function `address_from_bytes`](#0x1_util_address_from_bytes) +- [Specification](#@Specification_0) + - [Function `from_bytes`](#@Specification_0_from_bytes) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `address_from_bytes`](#@Specification_0_address_from_bytes) + + +
+ + + + + +## Function `from_bytes` + +Native function to deserialize a type T. + +Note that this function does not put any constraint on T. If code uses this function to +deserialized a linear value, its their responsibility that the data they deserialize is +owned. + + +
public(friend) fun from_bytes<T>(bytes: vector<u8>): T
+
+ + + +
+Implementation + + +
public(friend) native fun from_bytes<T>(bytes: vector<u8>): T;
+
+ + + +
+ + + +## Function `address_from_bytes` + + + +
public fun address_from_bytes(bytes: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun address_from_bytes(bytes: vector<u8>): address {
+    from_bytes(bytes)
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `from_bytes` + + +
public(friend) fun from_bytes<T>(bytes: vector<u8>): T
+
+ + + + + + + +### High-level Requirements + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The address input bytes should be exactly 32 bytes long.LowThe address_from_bytes function should assert if the length of the input bytes is 32.Verified via address_from_bytes.
+ + + + + + +### Module-level Specification + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_from_bytes<T>(bytes);
+
+ + + + + + + +
fun spec_from_bytes<T>(bytes: vector<u8>): T;
+
+ + + + + +### Function `address_from_bytes` + + +
public fun address_from_bytes(bytes: vector<u8>): address
+
+ + + + +
// This enforces high-level requirement 1:
+aborts_if [abstract] len(bytes) != 32;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/validator_consensus_info.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/validator_consensus_info.md new file mode 100644 index 0000000000000..0100837fdd975 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/validator_consensus_info.md @@ -0,0 +1,205 @@ + + + +# Module `0x1::validator_consensus_info` + +Common type: ValidatorConsensusInfo. + + +- [Struct `ValidatorConsensusInfo`](#0x1_validator_consensus_info_ValidatorConsensusInfo) +- [Function `default`](#0x1_validator_consensus_info_default) +- [Function `new`](#0x1_validator_consensus_info_new) +- [Function `get_addr`](#0x1_validator_consensus_info_get_addr) +- [Function `get_pk_bytes`](#0x1_validator_consensus_info_get_pk_bytes) +- [Function `get_voting_power`](#0x1_validator_consensus_info_get_voting_power) +- [Specification](#@Specification_0) + + +
+ + + + + +## Struct `ValidatorConsensusInfo` + +Information about a validator that participates consensus. + + +
struct ValidatorConsensusInfo has copy, drop, store
+
+ + + +
+Fields + + +
+
+addr: address +
+
+ +
+
+pk_bytes: vector<u8> +
+
+ +
+
+voting_power: u64 +
+
+ +
+
+ + +
+ + + +## Function `default` + +Create a default ValidatorConsensusInfo object. Value may be invalid. Only for place holding prupose. + + +
public fun default(): validator_consensus_info::ValidatorConsensusInfo
+
+ + + +
+Implementation + + +
public fun default(): ValidatorConsensusInfo {
+    ValidatorConsensusInfo {
+        addr: @vm,
+        pk_bytes: vector[],
+        voting_power: 0,
+    }
+}
+
+ + + +
+ + + +## Function `new` + +Create a ValidatorConsensusInfo object. + + +
public fun new(addr: address, pk_bytes: vector<u8>, voting_power: u64): validator_consensus_info::ValidatorConsensusInfo
+
+ + + +
+Implementation + + +
public fun new(addr: address, pk_bytes: vector<u8>, voting_power: u64): ValidatorConsensusInfo {
+    ValidatorConsensusInfo {
+        addr,
+        pk_bytes,
+        voting_power,
+    }
+}
+
+ + + +
+ + + +## Function `get_addr` + +Get ValidatorConsensusInfo.addr. + + +
public fun get_addr(vci: &validator_consensus_info::ValidatorConsensusInfo): address
+
+ + + +
+Implementation + + +
public fun get_addr(vci: &ValidatorConsensusInfo): address {
+    vci.addr
+}
+
+ + + +
+ + + +## Function `get_pk_bytes` + +Get ValidatorConsensusInfo.pk_bytes. + + +
public fun get_pk_bytes(vci: &validator_consensus_info::ValidatorConsensusInfo): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_pk_bytes(vci: &ValidatorConsensusInfo): vector<u8> {
+    vci.pk_bytes
+}
+
+ + + +
+ + + +## Function `get_voting_power` + +Get ValidatorConsensusInfo.voting_power. + + +
public fun get_voting_power(vci: &validator_consensus_info::ValidatorConsensusInfo): u64
+
+ + + +
+Implementation + + +
public fun get_voting_power(vci: &ValidatorConsensusInfo): u64 {
+    vci.voting_power
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/version.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/version.md new file mode 100644 index 0000000000000..28e34690a97de --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/version.md @@ -0,0 +1,430 @@ + + + +# Module `0x1::version` + +Maintains the version number for the blockchain. + + +- [Resource `Version`](#0x1_version_Version) +- [Resource `SetVersionCapability`](#0x1_version_SetVersionCapability) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_version_initialize) +- [Function `set_version`](#0x1_version_set_version) +- [Function `set_for_next_epoch`](#0x1_version_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_version_on_new_epoch) +- [Function `initialize_for_test`](#0x1_version_initialize_for_test) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `set_version`](#@Specification_1_set_version) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `initialize_for_test`](#@Specification_1_initialize_for_test) + + +
use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::error;
+use 0x1::reconfiguration;
+use 0x1::signer;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `Version` + + + +
struct Version has drop, store, key
+
+ + + +
+Fields + + +
+
+major: u64 +
+
+ +
+
+ + +
+ + + +## Resource `SetVersionCapability` + + + +
struct SetVersionCapability has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Specified major version number must be greater than current version number. + + +
const EINVALID_MAJOR_VERSION_NUMBER: u64 = 1;
+
+ + + + + +Account is not authorized to make this change. + + +
const ENOT_AUTHORIZED: u64 = 2;
+
+ + + + + +## Function `initialize` + +Only called during genesis. +Publishes the Version config. + + +
public(friend) fun initialize(aptos_framework: &signer, initial_version: u64)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer, initial_version: u64) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    move_to(aptos_framework, Version { major: initial_version });
+    // Give aptos framework account capability to call set version. This allows on chain governance to do it through
+    // control of the aptos framework account.
+    move_to(aptos_framework, SetVersionCapability {});
+}
+
+ + + +
+ + + +## Function `set_version` + +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function. + + +
public entry fun set_version(account: &signer, major: u64)
+
+ + + +
+Implementation + + +
public entry fun set_version(account: &signer, major: u64) acquires Version {
+    assert!(exists<SetVersionCapability>(signer::address_of(account)), error::permission_denied(ENOT_AUTHORIZED));
+    chain_status::assert_genesis();
+
+    let old_major = borrow_global<Version>(@aptos_framework).major;
+    assert!(old_major < major, error::invalid_argument(EINVALID_MAJOR_VERSION_NUMBER));
+
+    let config = borrow_global_mut<Version>(@aptos_framework);
+    config.major = major;
+
+    // Need to trigger reconfiguration so validator nodes can sync on the updated version.
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `set_for_next_epoch` + +Used in on-chain governances to update the major version for the next epoch. +Example usage: +- aptos_framework::version::set_for_next_epoch(&framework_signer, new_version); +- aptos_framework::aptos_governance::reconfigure(&framework_signer); + + +
public entry fun set_for_next_epoch(account: &signer, major: u64)
+
+ + + +
+Implementation + + +
public entry fun set_for_next_epoch(account: &signer, major: u64) acquires Version {
+    assert!(exists<SetVersionCapability>(signer::address_of(account)), error::permission_denied(ENOT_AUTHORIZED));
+    let old_major = borrow_global<Version>(@aptos_framework).major;
+    assert!(old_major < major, error::invalid_argument(EINVALID_MAJOR_VERSION_NUMBER));
+    config_buffer::upsert(Version {major});
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending Version, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires Version {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<Version>()) {
+        let new_value = config_buffer::extract<Version>();
+        if (exists<Version>(@aptos_framework)) {
+            *borrow_global_mut<Version>(@aptos_framework) = new_value;
+        } else {
+            move_to(framework, new_value);
+        }
+    }
+}
+
+ + + +
+ + + +## Function `initialize_for_test` + +Only called in tests and testnets. This allows the core resources account, which only exists in tests/testnets, +to update the version. + + +
fun initialize_for_test(core_resources: &signer)
+
+ + + +
+Implementation + + +
fun initialize_for_test(core_resources: &signer) {
+    system_addresses::assert_core_resource(core_resources);
+    move_to(core_resources, SetVersionCapability {});
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1During genesis, the Version resource should be initialized with the initial version and stored along with its capability under the aptos framework account.MediumThe initialize function ensures that the signer is the aptos framework account and stores the Version and SetVersionCapability resources in it.Formally verified via initialize.
2The version should be updateable after initialization, but only by the Aptos framework account and with an increasing version number.MediumThe version number for the blockchain should be updatable whenever necessary. This functionality is provided by the set_version function which ensures that the new version is greater than the previous one.Formally verified via set_version.
+ + + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `initialize` + + +
public(friend) fun initialize(aptos_framework: &signer, initial_version: u64)
+
+ + +Abort if resource already exists in @aptos_framwork when initializing. + + +
// This enforces high-level requirement 1:
+aborts_if signer::address_of(aptos_framework) != @aptos_framework;
+aborts_if exists<Version>(@aptos_framework);
+aborts_if exists<SetVersionCapability>(@aptos_framework);
+ensures exists<Version>(@aptos_framework);
+ensures exists<SetVersionCapability>(@aptos_framework);
+ensures global<Version>(@aptos_framework) == Version { major: initial_version };
+ensures global<SetVersionCapability>(@aptos_framework) == SetVersionCapability {};
+
+ + + + + +### Function `set_version` + + +
public entry fun set_version(account: &signer, major: u64)
+
+ + + + +
pragma verify_duration_estimate = 120;
+include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+include staking_config::StakingRewardsConfigRequirement;
+requires chain_status::is_genesis();
+requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time();
+requires exists<stake::ValidatorFees>(@aptos_framework);
+requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+aborts_if !exists<SetVersionCapability>(signer::address_of(account));
+aborts_if !exists<Version>(@aptos_framework);
+let old_major = global<Version>(@aptos_framework).major;
+// This enforces high-level requirement 2:
+aborts_if !(old_major < major);
+ensures global<Version>(@aptos_framework).major == major;
+
+ + + + + +### Function `set_for_next_epoch` + + +
public entry fun set_for_next_epoch(account: &signer, major: u64)
+
+ + + + +
aborts_if !exists<SetVersionCapability>(signer::address_of(account));
+aborts_if !exists<Version>(@aptos_framework);
+aborts_if global<Version>(@aptos_framework).major >= major;
+aborts_if !exists<config_buffer::PendingConfigs>(@aptos_framework);
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<Version>;
+aborts_if false;
+
+ + + + + +### Function `initialize_for_test` + + +
fun initialize_for_test(core_resources: &signer)
+
+ + +This module turns on aborts_if_is_strict, so need to add spec for test function initialize_for_test. + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/vesting.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/vesting.md new file mode 100644 index 0000000000000..49260e36409c8 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/vesting.md @@ -0,0 +1,4525 @@ + + + +# Module `0x1::vesting` + + +Simple vesting contract that allows specifying how much APT coins should be vesting in each fixed-size period. The +vesting contract also comes with staking and allows shareholders to withdraw rewards anytime. + +Vesting schedule is represented as a vector of distributions. For example, a vesting schedule of +[3/48, 3/48, 1/48] means that after the vesting starts: +1. The first and second periods will vest 3/48 of the total original grant. +2. The third period will vest 1/48. +3. All subsequent periods will also vest 1/48 (last distribution in the schedule) until the original grant runs out. + +Shareholder flow: +1. Admin calls create_vesting_contract with a schedule of [3/48, 3/48, 1/48] with a vesting cliff of 1 year and +vesting period of 1 month. +2. After a month, a shareholder calls unlock_rewards to request rewards. They can also call vest() which would also +unlocks rewards but since the 1 year cliff has not passed (vesting has not started), vest() would not release any of +the original grant. +3. After the unlocked rewards become fully withdrawable (as it's subject to staking lockup), shareholders can call +distribute() to send all withdrawable funds to all shareholders based on the original grant's shares structure. +4. After 1 year and 1 month, the vesting schedule now starts. Shareholders call vest() to unlock vested coins. vest() +checks the schedule and unlocks 3/48 of the original grant in addition to any accumulated rewards since last +unlock_rewards(). Once the unlocked coins become withdrawable, shareholders can call distribute(). +5. Assuming the shareholders forgot to call vest() for 2 months, when they call vest() again, they will unlock vested +tokens for the next period since last vest. This would be for the first month they missed. They can call vest() a +second time to unlock for the second month they missed. + +Admin flow: +1. After creating the vesting contract, admin cannot change the vesting schedule. +2. Admin can call update_voter, update_operator, or reset_lockup at any time to update the underlying staking +contract. +3. Admin can also call update_beneficiary for any shareholder. This would send all distributions (rewards, vested +coins) of that shareholder to the beneficiary account. By defalt, if a beneficiary is not set, the distributions are +send directly to the shareholder account. +4. Admin can call terminate_vesting_contract to terminate the vesting. This would first finish any distribution but +will prevent any further rewards or vesting distributions from being created. Once the locked up stake becomes +withdrawable, admin can call admin_withdraw to withdraw all funds to the vesting contract's withdrawal address. + + +- [Struct `VestingSchedule`](#0x1_vesting_VestingSchedule) +- [Struct `StakingInfo`](#0x1_vesting_StakingInfo) +- [Resource `VestingContract`](#0x1_vesting_VestingContract) +- [Resource `VestingAccountManagement`](#0x1_vesting_VestingAccountManagement) +- [Resource `AdminStore`](#0x1_vesting_AdminStore) +- [Struct `CreateVestingContract`](#0x1_vesting_CreateVestingContract) +- [Struct `UpdateOperator`](#0x1_vesting_UpdateOperator) +- [Struct `UpdateVoter`](#0x1_vesting_UpdateVoter) +- [Struct `ResetLockup`](#0x1_vesting_ResetLockup) +- [Struct `SetBeneficiary`](#0x1_vesting_SetBeneficiary) +- [Struct `UnlockRewards`](#0x1_vesting_UnlockRewards) +- [Struct `Vest`](#0x1_vesting_Vest) +- [Struct `Distribute`](#0x1_vesting_Distribute) +- [Struct `Terminate`](#0x1_vesting_Terminate) +- [Struct `AdminWithdraw`](#0x1_vesting_AdminWithdraw) +- [Struct `CreateVestingContractEvent`](#0x1_vesting_CreateVestingContractEvent) +- [Struct `UpdateOperatorEvent`](#0x1_vesting_UpdateOperatorEvent) +- [Struct `UpdateVoterEvent`](#0x1_vesting_UpdateVoterEvent) +- [Struct `ResetLockupEvent`](#0x1_vesting_ResetLockupEvent) +- [Struct `SetBeneficiaryEvent`](#0x1_vesting_SetBeneficiaryEvent) +- [Struct `UnlockRewardsEvent`](#0x1_vesting_UnlockRewardsEvent) +- [Struct `VestEvent`](#0x1_vesting_VestEvent) +- [Struct `DistributeEvent`](#0x1_vesting_DistributeEvent) +- [Struct `TerminateEvent`](#0x1_vesting_TerminateEvent) +- [Struct `AdminWithdrawEvent`](#0x1_vesting_AdminWithdrawEvent) +- [Constants](#@Constants_0) +- [Function `stake_pool_address`](#0x1_vesting_stake_pool_address) +- [Function `vesting_start_secs`](#0x1_vesting_vesting_start_secs) +- [Function `period_duration_secs`](#0x1_vesting_period_duration_secs) +- [Function `remaining_grant`](#0x1_vesting_remaining_grant) +- [Function `beneficiary`](#0x1_vesting_beneficiary) +- [Function `operator_commission_percentage`](#0x1_vesting_operator_commission_percentage) +- [Function `vesting_contracts`](#0x1_vesting_vesting_contracts) +- [Function `operator`](#0x1_vesting_operator) +- [Function `voter`](#0x1_vesting_voter) +- [Function `vesting_schedule`](#0x1_vesting_vesting_schedule) +- [Function `total_accumulated_rewards`](#0x1_vesting_total_accumulated_rewards) +- [Function `accumulated_rewards`](#0x1_vesting_accumulated_rewards) +- [Function `shareholders`](#0x1_vesting_shareholders) +- [Function `shareholder`](#0x1_vesting_shareholder) +- [Function `create_vesting_schedule`](#0x1_vesting_create_vesting_schedule) +- [Function `create_vesting_contract`](#0x1_vesting_create_vesting_contract) +- [Function `unlock_rewards`](#0x1_vesting_unlock_rewards) +- [Function `unlock_rewards_many`](#0x1_vesting_unlock_rewards_many) +- [Function `vest`](#0x1_vesting_vest) +- [Function `vest_many`](#0x1_vesting_vest_many) +- [Function `distribute`](#0x1_vesting_distribute) +- [Function `distribute_many`](#0x1_vesting_distribute_many) +- [Function `terminate_vesting_contract`](#0x1_vesting_terminate_vesting_contract) +- [Function `admin_withdraw`](#0x1_vesting_admin_withdraw) +- [Function `update_operator`](#0x1_vesting_update_operator) +- [Function `update_operator_with_same_commission`](#0x1_vesting_update_operator_with_same_commission) +- [Function `update_commission_percentage`](#0x1_vesting_update_commission_percentage) +- [Function `update_voter`](#0x1_vesting_update_voter) +- [Function `reset_lockup`](#0x1_vesting_reset_lockup) +- [Function `set_beneficiary`](#0x1_vesting_set_beneficiary) +- [Function `reset_beneficiary`](#0x1_vesting_reset_beneficiary) +- [Function `set_management_role`](#0x1_vesting_set_management_role) +- [Function `set_beneficiary_resetter`](#0x1_vesting_set_beneficiary_resetter) +- [Function `set_beneficiary_for_operator`](#0x1_vesting_set_beneficiary_for_operator) +- [Function `get_role_holder`](#0x1_vesting_get_role_holder) +- [Function `get_vesting_account_signer`](#0x1_vesting_get_vesting_account_signer) +- [Function `get_vesting_account_signer_internal`](#0x1_vesting_get_vesting_account_signer_internal) +- [Function `create_vesting_contract_account`](#0x1_vesting_create_vesting_contract_account) +- [Function `verify_admin`](#0x1_vesting_verify_admin) +- [Function `assert_vesting_contract_exists`](#0x1_vesting_assert_vesting_contract_exists) +- [Function `assert_active_vesting_contract`](#0x1_vesting_assert_active_vesting_contract) +- [Function `unlock_stake`](#0x1_vesting_unlock_stake) +- [Function `withdraw_stake`](#0x1_vesting_withdraw_stake) +- [Function `get_beneficiary`](#0x1_vesting_get_beneficiary) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `stake_pool_address`](#@Specification_1_stake_pool_address) + - [Function `vesting_start_secs`](#@Specification_1_vesting_start_secs) + - [Function `period_duration_secs`](#@Specification_1_period_duration_secs) + - [Function `remaining_grant`](#@Specification_1_remaining_grant) + - [Function `beneficiary`](#@Specification_1_beneficiary) + - [Function `operator_commission_percentage`](#@Specification_1_operator_commission_percentage) + - [Function `vesting_contracts`](#@Specification_1_vesting_contracts) + - [Function `operator`](#@Specification_1_operator) + - [Function `voter`](#@Specification_1_voter) + - [Function `vesting_schedule`](#@Specification_1_vesting_schedule) + - [Function `total_accumulated_rewards`](#@Specification_1_total_accumulated_rewards) + - [Function `accumulated_rewards`](#@Specification_1_accumulated_rewards) + - [Function `shareholders`](#@Specification_1_shareholders) + - [Function `shareholder`](#@Specification_1_shareholder) + - [Function `create_vesting_schedule`](#@Specification_1_create_vesting_schedule) + - [Function `create_vesting_contract`](#@Specification_1_create_vesting_contract) + - [Function `unlock_rewards`](#@Specification_1_unlock_rewards) + - [Function `unlock_rewards_many`](#@Specification_1_unlock_rewards_many) + - [Function `vest`](#@Specification_1_vest) + - [Function `vest_many`](#@Specification_1_vest_many) + - [Function `distribute`](#@Specification_1_distribute) + - [Function `distribute_many`](#@Specification_1_distribute_many) + - [Function `terminate_vesting_contract`](#@Specification_1_terminate_vesting_contract) + - [Function `admin_withdraw`](#@Specification_1_admin_withdraw) + - [Function `update_operator`](#@Specification_1_update_operator) + - [Function `update_operator_with_same_commission`](#@Specification_1_update_operator_with_same_commission) + - [Function `update_commission_percentage`](#@Specification_1_update_commission_percentage) + - [Function `update_voter`](#@Specification_1_update_voter) + - [Function `reset_lockup`](#@Specification_1_reset_lockup) + - [Function `set_beneficiary`](#@Specification_1_set_beneficiary) + - [Function `reset_beneficiary`](#@Specification_1_reset_beneficiary) + - [Function `set_management_role`](#@Specification_1_set_management_role) + - [Function `set_beneficiary_resetter`](#@Specification_1_set_beneficiary_resetter) + - [Function `set_beneficiary_for_operator`](#@Specification_1_set_beneficiary_for_operator) + - [Function `get_role_holder`](#@Specification_1_get_role_holder) + - [Function `get_vesting_account_signer`](#@Specification_1_get_vesting_account_signer) + - [Function `get_vesting_account_signer_internal`](#@Specification_1_get_vesting_account_signer_internal) + - [Function `create_vesting_contract_account`](#@Specification_1_create_vesting_contract_account) + - [Function `verify_admin`](#@Specification_1_verify_admin) + - [Function `assert_vesting_contract_exists`](#@Specification_1_assert_vesting_contract_exists) + - [Function `assert_active_vesting_contract`](#@Specification_1_assert_active_vesting_contract) + - [Function `unlock_stake`](#@Specification_1_unlock_stake) + - [Function `withdraw_stake`](#@Specification_1_withdraw_stake) + - [Function `get_beneficiary`](#@Specification_1_get_beneficiary) + + +
use 0x1::account;
+use 0x1::aptos_account;
+use 0x1::aptos_coin;
+use 0x1::bcs;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::fixed_point32;
+use 0x1::math64;
+use 0x1::pool_u64;
+use 0x1::signer;
+use 0x1::simple_map;
+use 0x1::stake;
+use 0x1::staking_contract;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+use 0x1::vector;
+
+ + + + + +## Struct `VestingSchedule` + + + +
struct VestingSchedule has copy, drop, store
+
+ + + +
+Fields + + +
+
+schedule: vector<fixed_point32::FixedPoint32> +
+
+ +
+
+start_timestamp_secs: u64 +
+
+ +
+
+period_duration: u64 +
+
+ +
+
+last_vested_period: u64 +
+
+ +
+
+ + +
+ + + +## Struct `StakingInfo` + + + +
struct StakingInfo has store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+operator: address +
+
+ +
+
+voter: address +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Resource `VestingContract` + + + +
struct VestingContract has key
+
+ + + +
+Fields + + +
+
+state: u64 +
+
+ +
+
+admin: address +
+
+ +
+
+grant_pool: pool_u64::Pool +
+
+ +
+
+beneficiaries: simple_map::SimpleMap<address, address> +
+
+ +
+
+vesting_schedule: vesting::VestingSchedule +
+
+ +
+
+withdrawal_address: address +
+
+ +
+
+staking: vesting::StakingInfo +
+
+ +
+
+remaining_grant: u64 +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+update_operator_events: event::EventHandle<vesting::UpdateOperatorEvent> +
+
+ +
+
+update_voter_events: event::EventHandle<vesting::UpdateVoterEvent> +
+
+ +
+
+reset_lockup_events: event::EventHandle<vesting::ResetLockupEvent> +
+
+ +
+
+set_beneficiary_events: event::EventHandle<vesting::SetBeneficiaryEvent> +
+
+ +
+
+unlock_rewards_events: event::EventHandle<vesting::UnlockRewardsEvent> +
+
+ +
+
+vest_events: event::EventHandle<vesting::VestEvent> +
+
+ +
+
+distribute_events: event::EventHandle<vesting::DistributeEvent> +
+
+ +
+
+terminate_events: event::EventHandle<vesting::TerminateEvent> +
+
+ +
+
+admin_withdraw_events: event::EventHandle<vesting::AdminWithdrawEvent> +
+
+ +
+
+ + +
+ + + +## Resource `VestingAccountManagement` + + + +
struct VestingAccountManagement has key
+
+ + + +
+Fields + + +
+
+roles: simple_map::SimpleMap<string::String, address> +
+
+ +
+
+ + +
+ + + +## Resource `AdminStore` + + + +
struct AdminStore has key
+
+ + + +
+Fields + + +
+
+vesting_contracts: vector<address> +
+
+ +
+
+nonce: u64 +
+
+ +
+
+create_events: event::EventHandle<vesting::CreateVestingContractEvent> +
+
+ +
+
+ + +
+ + + +## Struct `CreateVestingContract` + + + +
#[event]
+struct CreateVestingContract has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+voter: address +
+
+ +
+
+grant_amount: u64 +
+
+ +
+
+withdrawal_address: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateOperator` + + + +
#[event]
+struct UpdateOperator has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+old_operator: address +
+
+ +
+
+new_operator: address +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateVoter` + + + +
#[event]
+struct UpdateVoter has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+old_voter: address +
+
+ +
+
+new_voter: address +
+
+ +
+
+ + +
+ + + +## Struct `ResetLockup` + + + +
#[event]
+struct ResetLockup has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+new_lockup_expiration_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `SetBeneficiary` + + + +
#[event]
+struct SetBeneficiary has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+shareholder: address +
+
+ +
+
+old_beneficiary: address +
+
+ +
+
+new_beneficiary: address +
+
+ +
+
+ + +
+ + + +## Struct `UnlockRewards` + + + +
#[event]
+struct UnlockRewards has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Vest` + + + +
#[event]
+struct Vest has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+period_vested: u64 +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Distribute` + + + +
#[event]
+struct Distribute has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Terminate` + + + +
#[event]
+struct Terminate has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AdminWithdraw` + + + +
#[event]
+struct AdminWithdraw has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `CreateVestingContractEvent` + + + +
struct CreateVestingContractEvent has drop, store
+
+ + + +
+Fields + + +
+
+operator: address +
+
+ +
+
+voter: address +
+
+ +
+
+grant_amount: u64 +
+
+ +
+
+withdrawal_address: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateOperatorEvent` + + + +
struct UpdateOperatorEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+old_operator: address +
+
+ +
+
+new_operator: address +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UpdateVoterEvent` + + + +
struct UpdateVoterEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+old_voter: address +
+
+ +
+
+new_voter: address +
+
+ +
+
+ + +
+ + + +## Struct `ResetLockupEvent` + + + +
struct ResetLockupEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+new_lockup_expiration_secs: u64 +
+
+ +
+
+ + +
+ + + +## Struct `SetBeneficiaryEvent` + + + +
struct SetBeneficiaryEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+shareholder: address +
+
+ +
+
+old_beneficiary: address +
+
+ +
+
+new_beneficiary: address +
+
+ +
+
+ + +
+ + + +## Struct `UnlockRewardsEvent` + + + +
struct UnlockRewardsEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `VestEvent` + + + +
struct VestEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+staking_pool_address: address +
+
+ +
+
+period_vested: u64 +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DistributeEvent` + + + +
struct DistributeEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TerminateEvent` + + + +
struct TerminateEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+ + +
+ + + +## Struct `AdminWithdrawEvent` + + + +
struct AdminWithdrawEvent has drop, store
+
+ + + +
+Fields + + +
+
+admin: address +
+
+ +
+
+vesting_contract_address: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Vesting schedule cannot be empty. + + +
const EEMPTY_VESTING_SCHEDULE: u64 = 2;
+
+ + + + + +Withdrawal address is invalid. + + +
const EINVALID_WITHDRAWAL_ADDRESS: u64 = 1;
+
+ + + + + +The signer is not the admin of the vesting contract. + + +
const ENOT_ADMIN: u64 = 7;
+
+ + + + + +Shareholders list cannot be empty. + + +
const ENO_SHAREHOLDERS: u64 = 4;
+
+ + + + + +Cannot terminate the vesting contract with pending active stake. Need to wait until next epoch. + + +
const EPENDING_STAKE_FOUND: u64 = 11;
+
+ + + + + +Account is not admin or does not have the required role to take this action. + + +
const EPERMISSION_DENIED: u64 = 15;
+
+ + + + + +The vesting account has no such management role. + + +
const EROLE_NOT_FOUND: u64 = 14;
+
+ + + + + +The length of shareholders and shares lists don't match. + + +
const ESHARES_LENGTH_MISMATCH: u64 = 5;
+
+ + + + + +Zero items were provided to a *_many function. + + +
const EVEC_EMPTY_FOR_MANY_FUNCTION: u64 = 16;
+
+ + + + + +Vesting account has no other management roles beside admin. + + +
const EVESTING_ACCOUNT_HAS_NO_ROLES: u64 = 13;
+
+ + + + + +Vesting contract needs to be in active state. + + +
const EVESTING_CONTRACT_NOT_ACTIVE: u64 = 8;
+
+ + + + + +No vesting contract found at provided address. + + +
const EVESTING_CONTRACT_NOT_FOUND: u64 = 10;
+
+ + + + + +Admin can only withdraw from an inactive (paused or terminated) vesting contract. + + +
const EVESTING_CONTRACT_STILL_ACTIVE: u64 = 9;
+
+ + + + + +Vesting cannot start before or at the current block timestamp. Has to be in the future. + + +
const EVESTING_START_TOO_SOON: u64 = 6;
+
+ + + + + +Grant amount cannot be 0. + + +
const EZERO_GRANT: u64 = 12;
+
+ + + + + +Vesting period cannot be 0. + + +
const EZERO_VESTING_SCHEDULE_PERIOD: u64 = 3;
+
+ + + + + +Maximum number of shareholders a vesting pool can support. + + +
const MAXIMUM_SHAREHOLDERS: u64 = 30;
+
+ + + + + +Roles that can manage certain aspects of the vesting account beyond the main admin. + + +
const ROLE_BENEFICIARY_RESETTER: vector<u8> = [82, 79, 76, 69, 95, 66, 69, 78, 69, 70, 73, 67, 73, 65, 82, 89, 95, 82, 69, 83, 69, 84, 84, 69, 82];
+
+ + + + + +Vesting contract states. +Vesting contract is active and distributions can be made. + + +
const VESTING_POOL_ACTIVE: u64 = 1;
+
+ + + + + + + +
const VESTING_POOL_SALT: vector<u8> = [97, 112, 116, 111, 115, 95, 102, 114, 97, 109, 101, 119, 111, 114, 107, 58, 58, 118, 101, 115, 116, 105, 110, 103];
+
+ + + + + +Vesting contract has been terminated and all funds have been released back to the withdrawal address. + + +
const VESTING_POOL_TERMINATED: u64 = 2;
+
+ + + + + +## Function `stake_pool_address` + +Return the address of the underlying stake pool (separate resource account) of the vesting contract. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun stake_pool_address(vesting_contract_address: address): address
+
+ + + +
+Implementation + + +
public fun stake_pool_address(vesting_contract_address: address): address acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).staking.pool_address
+}
+
+ + + +
+ + + +## Function `vesting_start_secs` + +Return the vesting start timestamp (in seconds) of the vesting contract. +Vesting will start at this time, and once a full period has passed, the first vest will become unlocked. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun vesting_start_secs(vesting_contract_address: address): u64
+
+ + + +
+Implementation + + +
public fun vesting_start_secs(vesting_contract_address: address): u64 acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).vesting_schedule.start_timestamp_secs
+}
+
+ + + +
+ + + +## Function `period_duration_secs` + +Return the duration of one vesting period (in seconds). +Each vest is released after one full period has started, starting from the specified start_timestamp_secs. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun period_duration_secs(vesting_contract_address: address): u64
+
+ + + +
+Implementation + + +
public fun period_duration_secs(vesting_contract_address: address): u64 acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).vesting_schedule.period_duration
+}
+
+ + + +
+ + + +## Function `remaining_grant` + +Return the remaining grant, consisting of unvested coins that have not been distributed to shareholders. +Prior to start_timestamp_secs, the remaining grant will always be equal to the original grant. +Once vesting has started, and vested tokens are distributed, the remaining grant will decrease over time, +according to the vesting schedule. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun remaining_grant(vesting_contract_address: address): u64
+
+ + + +
+Implementation + + +
public fun remaining_grant(vesting_contract_address: address): u64 acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).remaining_grant
+}
+
+ + + +
+ + + +## Function `beneficiary` + +Return the beneficiary account of the specified shareholder in a vesting contract. +This is the same as the shareholder address by default and only different if it's been explicitly set. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun beneficiary(vesting_contract_address: address, shareholder: address): address
+
+ + + +
+Implementation + + +
public fun beneficiary(vesting_contract_address: address, shareholder: address): address acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    get_beneficiary(borrow_global<VestingContract>(vesting_contract_address), shareholder)
+}
+
+ + + +
+ + + +## Function `operator_commission_percentage` + +Return the percentage of accumulated rewards that is paid to the operator as commission. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun operator_commission_percentage(vesting_contract_address: address): u64
+
+ + + +
+Implementation + + +
public fun operator_commission_percentage(vesting_contract_address: address): u64 acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).staking.commission_percentage
+}
+
+ + + +
+ + + +## Function `vesting_contracts` + +Return all the vesting contracts a given address is an admin of. + + +
#[view]
+public fun vesting_contracts(admin: address): vector<address>
+
+ + + +
+Implementation + + +
public fun vesting_contracts(admin: address): vector<address> acquires AdminStore {
+    if (!exists<AdminStore>(admin)) {
+        vector::empty<address>()
+    } else {
+        borrow_global<AdminStore>(admin).vesting_contracts
+    }
+}
+
+ + + +
+ + + +## Function `operator` + +Return the operator who runs the validator for the vesting contract. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun operator(vesting_contract_address: address): address
+
+ + + +
+Implementation + + +
public fun operator(vesting_contract_address: address): address acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).staking.operator
+}
+
+ + + +
+ + + +## Function `voter` + +Return the voter who will be voting on on-chain governance proposals on behalf of the vesting contract's stake +pool. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun voter(vesting_contract_address: address): address
+
+ + + +
+Implementation + + +
public fun voter(vesting_contract_address: address): address acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).staking.voter
+}
+
+ + + +
+ + + +## Function `vesting_schedule` + +Return the vesting contract's vesting schedule. The core schedule is represented as a list of u64-based +fractions, where the rightmmost 32 bits can be divided by 2^32 to get the fraction, and anything else is the +whole number. + +For example 3/48, or 0.0625, will be represented as 268435456. The fractional portion would be +268435456 / 2^32 = 0.0625. Since there are fewer than 32 bits, the whole number portion is effectively 0. +So 268435456 = 0.0625. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun vesting_schedule(vesting_contract_address: address): vesting::VestingSchedule
+
+ + + +
+Implementation + + +
public fun vesting_schedule(vesting_contract_address: address): VestingSchedule acquires VestingContract {
+    assert_vesting_contract_exists(vesting_contract_address);
+    borrow_global<VestingContract>(vesting_contract_address).vesting_schedule
+}
+
+ + + +
+ + + +## Function `total_accumulated_rewards` + +Return the total accumulated rewards that have not been distributed to shareholders of the vesting contract. +This excludes any unpaid commission that the operator has not collected. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun total_accumulated_rewards(vesting_contract_address: address): u64
+
+ + + +
+Implementation + + +
public fun total_accumulated_rewards(vesting_contract_address: address): u64 acquires VestingContract {
+    assert_active_vesting_contract(vesting_contract_address);
+
+    let vesting_contract = borrow_global<VestingContract>(vesting_contract_address);
+    let (total_active_stake, _, commission_amount) =
+        staking_contract::staking_contract_amounts(vesting_contract_address, vesting_contract.staking.operator);
+    total_active_stake - vesting_contract.remaining_grant - commission_amount
+}
+
+ + + +
+ + + +## Function `accumulated_rewards` + +Return the accumulated rewards that have not been distributed to the provided shareholder. Caller can also pass +the beneficiary address instead of shareholder address. + +This errors out if the vesting contract with the provided address doesn't exist. + + +
#[view]
+public fun accumulated_rewards(vesting_contract_address: address, shareholder_or_beneficiary: address): u64
+
+ + + +
+Implementation + + +
public fun accumulated_rewards(
+    vesting_contract_address: address, shareholder_or_beneficiary: address): u64 acquires VestingContract {
+    assert_active_vesting_contract(vesting_contract_address);
+
+    let total_accumulated_rewards = total_accumulated_rewards(vesting_contract_address);
+    let shareholder = shareholder(vesting_contract_address, shareholder_or_beneficiary);
+    let vesting_contract = borrow_global<VestingContract>(vesting_contract_address);
+    let shares = pool_u64::shares(&vesting_contract.grant_pool, shareholder);
+    pool_u64::shares_to_amount_with_total_coins(&vesting_contract.grant_pool, shares, total_accumulated_rewards)
+}
+
+ + + +
+ + + +## Function `shareholders` + +Return the list of all shareholders in the vesting contract. + + +
#[view]
+public fun shareholders(vesting_contract_address: address): vector<address>
+
+ + + +
+Implementation + + +
public fun shareholders(vesting_contract_address: address): vector<address> acquires VestingContract {
+    assert_active_vesting_contract(vesting_contract_address);
+
+    let vesting_contract = borrow_global<VestingContract>(vesting_contract_address);
+    pool_u64::shareholders(&vesting_contract.grant_pool)
+}
+
+ + + +
+ + + +## Function `shareholder` + +Return the shareholder address given the beneficiary address in a given vesting contract. If there are multiple +shareholders with the same beneficiary address, only the first shareholder is returned. If the given beneficiary +address is actually a shareholder address, just return the address back. + +This returns 0x0 if no shareholder is found for the given beneficiary / the address is not a shareholder itself. + + +
#[view]
+public fun shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address
+
+ + + +
+Implementation + + +
public fun shareholder(
+    vesting_contract_address: address,
+    shareholder_or_beneficiary: address
+): address acquires VestingContract {
+    assert_active_vesting_contract(vesting_contract_address);
+
+    let shareholders = &shareholders(vesting_contract_address);
+    if (vector::contains(shareholders, &shareholder_or_beneficiary)) {
+        return shareholder_or_beneficiary
+    };
+    let vesting_contract = borrow_global<VestingContract>(vesting_contract_address);
+    let result = @0x0;
+    vector::any(shareholders, |shareholder| {
+        if (shareholder_or_beneficiary == get_beneficiary(vesting_contract, *shareholder)) {
+            result = *shareholder;
+            true
+        } else {
+            false
+        }
+    });
+
+    result
+}
+
+ + + +
+ + + +## Function `create_vesting_schedule` + +Create a vesting schedule with the given schedule of distributions, a vesting start time and period duration. + + +
public fun create_vesting_schedule(schedule: vector<fixed_point32::FixedPoint32>, start_timestamp_secs: u64, period_duration: u64): vesting::VestingSchedule
+
+ + + +
+Implementation + + +
public fun create_vesting_schedule(
+    schedule: vector<FixedPoint32>,
+    start_timestamp_secs: u64,
+    period_duration: u64,
+): VestingSchedule {
+    assert!(vector::length(&schedule) > 0, error::invalid_argument(EEMPTY_VESTING_SCHEDULE));
+    assert!(period_duration > 0, error::invalid_argument(EZERO_VESTING_SCHEDULE_PERIOD));
+    assert!(
+        start_timestamp_secs >= timestamp::now_seconds(),
+        error::invalid_argument(EVESTING_START_TOO_SOON),
+    );
+
+    VestingSchedule {
+        schedule,
+        start_timestamp_secs,
+        period_duration,
+        last_vested_period: 0,
+    }
+}
+
+ + + +
+ + + +## Function `create_vesting_contract` + +Create a vesting contract with a given configurations. + + +
public fun create_vesting_contract(admin: &signer, shareholders: &vector<address>, buy_ins: simple_map::SimpleMap<address, coin::Coin<aptos_coin::AptosCoin>>, vesting_schedule: vesting::VestingSchedule, withdrawal_address: address, operator: address, voter: address, commission_percentage: u64, contract_creation_seed: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun create_vesting_contract(
+    admin: &signer,
+    shareholders: &vector<address>,
+    buy_ins: SimpleMap<address, Coin<AptosCoin>>,
+    vesting_schedule: VestingSchedule,
+    withdrawal_address: address,
+    operator: address,
+    voter: address,
+    commission_percentage: u64,
+    // Optional seed used when creating the staking contract account.
+    contract_creation_seed: vector<u8>,
+): address acquires AdminStore {
+    assert!(
+        !system_addresses::is_reserved_address(withdrawal_address),
+        error::invalid_argument(EINVALID_WITHDRAWAL_ADDRESS),
+    );
+    assert_account_is_registered_for_apt(withdrawal_address);
+    assert!(vector::length(shareholders) > 0, error::invalid_argument(ENO_SHAREHOLDERS));
+    assert!(
+        simple_map::length(&buy_ins) == vector::length(shareholders),
+        error::invalid_argument(ESHARES_LENGTH_MISMATCH),
+    );
+
+    // Create a coins pool to track shareholders and shares of the grant.
+    let grant = coin::zero<AptosCoin>();
+    let grant_amount = 0;
+    let grant_pool = pool_u64::create(MAXIMUM_SHAREHOLDERS);
+    vector::for_each_ref(shareholders, |shareholder| {
+        let shareholder: address = *shareholder;
+        let (_, buy_in) = simple_map::remove(&mut buy_ins, &shareholder);
+        let buy_in_amount = coin::value(&buy_in);
+        coin::merge(&mut grant, buy_in);
+        pool_u64::buy_in(
+            &mut grant_pool,
+            shareholder,
+            buy_in_amount,
+        );
+        grant_amount = grant_amount + buy_in_amount;
+    });
+    assert!(grant_amount > 0, error::invalid_argument(EZERO_GRANT));
+
+    // If this is the first time this admin account has created a vesting contract, initialize the admin store.
+    let admin_address = signer::address_of(admin);
+    if (!exists<AdminStore>(admin_address)) {
+        move_to(admin, AdminStore {
+            vesting_contracts: vector::empty<address>(),
+            nonce: 0,
+            create_events: new_event_handle<CreateVestingContractEvent>(admin),
+        });
+    };
+
+    // Initialize the vesting contract in a new resource account. This allows the same admin to create multiple
+    // pools.
+    let (contract_signer, contract_signer_cap) = create_vesting_contract_account(admin, contract_creation_seed);
+    let pool_address = staking_contract::create_staking_contract_with_coins(
+        &contract_signer, operator, voter, grant, commission_percentage, contract_creation_seed);
+
+    // Add the newly created vesting contract's address to the admin store.
+    let contract_address = signer::address_of(&contract_signer);
+    let admin_store = borrow_global_mut<AdminStore>(admin_address);
+    vector::push_back(&mut admin_store.vesting_contracts, contract_address);
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            CreateVestingContract {
+                operator,
+                voter,
+                withdrawal_address,
+                grant_amount,
+                vesting_contract_address: contract_address,
+                staking_pool_address: pool_address,
+                commission_percentage,
+            },
+        );
+    };
+    emit_event(
+        &mut admin_store.create_events,
+        CreateVestingContractEvent {
+            operator,
+            voter,
+            withdrawal_address,
+            grant_amount,
+            vesting_contract_address: contract_address,
+            staking_pool_address: pool_address,
+            commission_percentage,
+        },
+    );
+
+    move_to(&contract_signer, VestingContract {
+        state: VESTING_POOL_ACTIVE,
+        admin: admin_address,
+        grant_pool,
+        beneficiaries: simple_map::create<address, address>(),
+        vesting_schedule,
+        withdrawal_address,
+        staking: StakingInfo { pool_address, operator, voter, commission_percentage },
+        remaining_grant: grant_amount,
+        signer_cap: contract_signer_cap,
+        update_operator_events: new_event_handle<UpdateOperatorEvent>(&contract_signer),
+        update_voter_events: new_event_handle<UpdateVoterEvent>(&contract_signer),
+        reset_lockup_events: new_event_handle<ResetLockupEvent>(&contract_signer),
+        set_beneficiary_events: new_event_handle<SetBeneficiaryEvent>(&contract_signer),
+        unlock_rewards_events: new_event_handle<UnlockRewardsEvent>(&contract_signer),
+        vest_events: new_event_handle<VestEvent>(&contract_signer),
+        distribute_events: new_event_handle<DistributeEvent>(&contract_signer),
+        terminate_events: new_event_handle<TerminateEvent>(&contract_signer),
+        admin_withdraw_events: new_event_handle<AdminWithdrawEvent>(&contract_signer),
+    });
+
+    simple_map::destroy_empty(buy_ins);
+    contract_address
+}
+
+ + + +
+ + + +## Function `unlock_rewards` + +Unlock any accumulated rewards. + + +
public entry fun unlock_rewards(contract_address: address)
+
+ + + +
+Implementation + + +
public entry fun unlock_rewards(contract_address: address) acquires VestingContract {
+    let accumulated_rewards = total_accumulated_rewards(contract_address);
+    let vesting_contract = borrow_global<VestingContract>(contract_address);
+    unlock_stake(vesting_contract, accumulated_rewards);
+}
+
+ + + +
+ + + +## Function `unlock_rewards_many` + +Call unlock_rewards for many vesting contracts. + + +
public entry fun unlock_rewards_many(contract_addresses: vector<address>)
+
+ + + +
+Implementation + + +
public entry fun unlock_rewards_many(contract_addresses: vector<address>) acquires VestingContract {
+    let len = vector::length(&contract_addresses);
+
+    assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION));
+
+    vector::for_each_ref(&contract_addresses, |contract_address| {
+        let contract_address: address = *contract_address;
+        unlock_rewards(contract_address);
+    });
+}
+
+ + + +
+ + + +## Function `vest` + +Unlock any vested portion of the grant. + + +
public entry fun vest(contract_address: address)
+
+ + + +
+Implementation + + +
public entry fun vest(contract_address: address) acquires VestingContract {
+    // Unlock all rewards first, if any.
+    unlock_rewards(contract_address);
+
+    // Unlock the vested amount. This amount will become withdrawable when the underlying stake pool's lockup
+    // expires.
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    // Short-circuit if vesting hasn't started yet.
+    if (vesting_contract.vesting_schedule.start_timestamp_secs > timestamp::now_seconds()) {
+        return
+    };
+
+    // Check if the next vested period has already passed. If not, short-circuit since there's nothing to vest.
+    let vesting_schedule = &mut vesting_contract.vesting_schedule;
+    let last_vested_period = vesting_schedule.last_vested_period;
+    let next_period_to_vest = last_vested_period + 1;
+    let last_completed_period =
+        (timestamp::now_seconds() - vesting_schedule.start_timestamp_secs) / vesting_schedule.period_duration;
+    if (last_completed_period < next_period_to_vest) {
+        return
+    };
+
+    // Calculate how much has vested, excluding rewards.
+    // Index is 0-based while period is 1-based so we need to subtract 1.
+    let schedule = &vesting_schedule.schedule;
+    let schedule_index = next_period_to_vest - 1;
+    let vesting_fraction = if (schedule_index < vector::length(schedule)) {
+        *vector::borrow(schedule, schedule_index)
+    } else {
+        // Last vesting schedule fraction will repeat until the grant runs out.
+        *vector::borrow(schedule, vector::length(schedule) - 1)
+    };
+    let total_grant = pool_u64::total_coins(&vesting_contract.grant_pool);
+    let vested_amount = fixed_point32::multiply_u64(total_grant, vesting_fraction);
+    // Cap vested amount by the remaining grant amount so we don't try to distribute more than what's remaining.
+    vested_amount = min(vested_amount, vesting_contract.remaining_grant);
+    vesting_contract.remaining_grant = vesting_contract.remaining_grant - vested_amount;
+    vesting_schedule.last_vested_period = next_period_to_vest;
+    unlock_stake(vesting_contract, vested_amount);
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            Vest {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                staking_pool_address: vesting_contract.staking.pool_address,
+                period_vested: next_period_to_vest,
+                amount: vested_amount,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.vest_events,
+        VestEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            staking_pool_address: vesting_contract.staking.pool_address,
+            period_vested: next_period_to_vest,
+            amount: vested_amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `vest_many` + +Call vest for many vesting contracts. + + +
public entry fun vest_many(contract_addresses: vector<address>)
+
+ + + +
+Implementation + + +
public entry fun vest_many(contract_addresses: vector<address>) acquires VestingContract {
+    let len = vector::length(&contract_addresses);
+
+    assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION));
+
+    vector::for_each_ref(&contract_addresses, |contract_address| {
+        let contract_address = *contract_address;
+        vest(contract_address);
+    });
+}
+
+ + + +
+ + + +## Function `distribute` + +Distribute any withdrawable stake from the stake pool. + + +
public entry fun distribute(contract_address: address)
+
+ + + +
+Implementation + + +
public entry fun distribute(contract_address: address) acquires VestingContract {
+    assert_active_vesting_contract(contract_address);
+
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    let coins = withdraw_stake(vesting_contract, contract_address);
+    let total_distribution_amount = coin::value(&coins);
+    if (total_distribution_amount == 0) {
+        coin::destroy_zero(coins);
+        return
+    };
+
+    // Distribute coins to all shareholders in the vesting contract.
+    let grant_pool = &vesting_contract.grant_pool;
+    let shareholders = &pool_u64::shareholders(grant_pool);
+    vector::for_each_ref(shareholders, |shareholder| {
+        let shareholder = *shareholder;
+        let shares = pool_u64::shares(grant_pool, shareholder);
+        let amount = pool_u64::shares_to_amount_with_total_coins(grant_pool, shares, total_distribution_amount);
+        let share_of_coins = coin::extract(&mut coins, amount);
+        let recipient_address = get_beneficiary(vesting_contract, shareholder);
+        aptos_account::deposit_coins(recipient_address, share_of_coins);
+    });
+
+    // Send any remaining "dust" (leftover due to rounding error) to the withdrawal address.
+    if (coin::value(&coins) > 0) {
+        aptos_account::deposit_coins(vesting_contract.withdrawal_address, coins);
+    } else {
+        coin::destroy_zero(coins);
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            Distribute {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                amount: total_distribution_amount,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.distribute_events,
+        DistributeEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            amount: total_distribution_amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `distribute_many` + +Call distribute for many vesting contracts. + + +
public entry fun distribute_many(contract_addresses: vector<address>)
+
+ + + +
+Implementation + + +
public entry fun distribute_many(contract_addresses: vector<address>) acquires VestingContract {
+    let len = vector::length(&contract_addresses);
+
+    assert!(len != 0, error::invalid_argument(EVEC_EMPTY_FOR_MANY_FUNCTION));
+
+    vector::for_each_ref(&contract_addresses, |contract_address| {
+        let contract_address = *contract_address;
+        distribute(contract_address);
+    });
+}
+
+ + + +
+ + + +## Function `terminate_vesting_contract` + +Terminate the vesting contract and send all funds back to the withdrawal address. + + +
public entry fun terminate_vesting_contract(admin: &signer, contract_address: address)
+
+ + + +
+Implementation + + +
public entry fun terminate_vesting_contract(admin: &signer, contract_address: address) acquires VestingContract {
+    assert_active_vesting_contract(contract_address);
+
+    // Distribute all withdrawable coins, which should have been from previous rewards withdrawal or vest.
+    distribute(contract_address);
+
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    let (active_stake, _, pending_active_stake, _) = stake::get_stake(vesting_contract.staking.pool_address);
+    assert!(pending_active_stake == 0, error::invalid_state(EPENDING_STAKE_FOUND));
+
+    // Unlock all remaining active stake.
+    vesting_contract.state = VESTING_POOL_TERMINATED;
+    vesting_contract.remaining_grant = 0;
+    unlock_stake(vesting_contract, active_stake);
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            Terminate {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.terminate_events,
+        TerminateEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `admin_withdraw` + +Withdraw all funds to the preset vesting contract's withdrawal address. This can only be called if the contract +has already been terminated. + + +
public entry fun admin_withdraw(admin: &signer, contract_address: address)
+
+ + + +
+Implementation + + +
public entry fun admin_withdraw(admin: &signer, contract_address: address) acquires VestingContract {
+    let vesting_contract = borrow_global<VestingContract>(contract_address);
+    assert!(
+        vesting_contract.state == VESTING_POOL_TERMINATED,
+        error::invalid_state(EVESTING_CONTRACT_STILL_ACTIVE)
+    );
+
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    let coins = withdraw_stake(vesting_contract, contract_address);
+    let amount = coin::value(&coins);
+    if (amount == 0) {
+        coin::destroy_zero(coins);
+        return
+    };
+    aptos_account::deposit_coins(vesting_contract.withdrawal_address, coins);
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            AdminWithdraw {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                amount,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.admin_withdraw_events,
+        AdminWithdrawEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            amount,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `update_operator` + + + +
public entry fun update_operator(admin: &signer, contract_address: address, new_operator: address, commission_percentage: u64)
+
+ + + +
+Implementation + + +
public entry fun update_operator(
+    admin: &signer,
+    contract_address: address,
+    new_operator: address,
+    commission_percentage: u64,
+) acquires VestingContract {
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+    let old_operator = vesting_contract.staking.operator;
+    staking_contract::switch_operator(contract_signer, old_operator, new_operator, commission_percentage);
+    vesting_contract.staking.operator = new_operator;
+    vesting_contract.staking.commission_percentage = commission_percentage;
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            UpdateOperator {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                staking_pool_address: vesting_contract.staking.pool_address,
+                old_operator,
+                new_operator,
+                commission_percentage,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.update_operator_events,
+        UpdateOperatorEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            staking_pool_address: vesting_contract.staking.pool_address,
+            old_operator,
+            new_operator,
+            commission_percentage,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `update_operator_with_same_commission` + + + +
public entry fun update_operator_with_same_commission(admin: &signer, contract_address: address, new_operator: address)
+
+ + + +
+Implementation + + +
public entry fun update_operator_with_same_commission(
+    admin: &signer,
+    contract_address: address,
+    new_operator: address,
+) acquires VestingContract {
+    let commission_percentage = operator_commission_percentage(contract_address);
+    update_operator(admin, contract_address, new_operator, commission_percentage);
+}
+
+ + + +
+ + + +## Function `update_commission_percentage` + + + +
public entry fun update_commission_percentage(admin: &signer, contract_address: address, new_commission_percentage: u64)
+
+ + + +
+Implementation + + +
public entry fun update_commission_percentage(
+    admin: &signer,
+    contract_address: address,
+    new_commission_percentage: u64,
+) acquires VestingContract {
+    let operator = operator(contract_address);
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+    staking_contract::update_commision(contract_signer, operator, new_commission_percentage);
+    vesting_contract.staking.commission_percentage = new_commission_percentage;
+    // This function does not emit an event. Instead, `staking_contract::update_commission_percentage`
+    // emits the event for this commission percentage update.
+}
+
+ + + +
+ + + +## Function `update_voter` + + + +
public entry fun update_voter(admin: &signer, contract_address: address, new_voter: address)
+
+ + + +
+Implementation + + +
public entry fun update_voter(
+    admin: &signer,
+    contract_address: address,
+    new_voter: address,
+) acquires VestingContract {
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+    let old_voter = vesting_contract.staking.voter;
+    staking_contract::update_voter(contract_signer, vesting_contract.staking.operator, new_voter);
+    vesting_contract.staking.voter = new_voter;
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            UpdateVoter {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                staking_pool_address: vesting_contract.staking.pool_address,
+                old_voter,
+                new_voter,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.update_voter_events,
+        UpdateVoterEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            staking_pool_address: vesting_contract.staking.pool_address,
+            old_voter,
+            new_voter,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `reset_lockup` + + + +
public entry fun reset_lockup(admin: &signer, contract_address: address)
+
+ + + +
+Implementation + + +
public entry fun reset_lockup(
+    admin: &signer,
+    contract_address: address,
+) acquires VestingContract {
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+    staking_contract::reset_lockup(contract_signer, vesting_contract.staking.operator);
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            ResetLockup {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                staking_pool_address: vesting_contract.staking.pool_address,
+                new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address),
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.reset_lockup_events,
+        ResetLockupEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            staking_pool_address: vesting_contract.staking.pool_address,
+            new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address),
+        },
+    );
+}
+
+ + + +
+ + + +## Function `set_beneficiary` + + + +
public entry fun set_beneficiary(admin: &signer, contract_address: address, shareholder: address, new_beneficiary: address)
+
+ + + +
+Implementation + + +
public entry fun set_beneficiary(
+    admin: &signer,
+    contract_address: address,
+    shareholder: address,
+    new_beneficiary: address,
+) acquires VestingContract {
+    // Verify that the beneficiary account is set up to receive APT. This is a requirement so distribute() wouldn't
+    // fail and block all other accounts from receiving APT if one beneficiary is not registered.
+    assert_account_is_registered_for_apt(new_beneficiary);
+
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+
+    let old_beneficiary = get_beneficiary(vesting_contract, shareholder);
+    let beneficiaries = &mut vesting_contract.beneficiaries;
+    if (simple_map::contains_key(beneficiaries, &shareholder)) {
+        let beneficiary = simple_map::borrow_mut(beneficiaries, &shareholder);
+        *beneficiary = new_beneficiary;
+    } else {
+        simple_map::add(beneficiaries, shareholder, new_beneficiary);
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        emit(
+            SetBeneficiary {
+                admin: vesting_contract.admin,
+                vesting_contract_address: contract_address,
+                shareholder,
+                old_beneficiary,
+                new_beneficiary,
+            },
+        );
+    };
+    emit_event(
+        &mut vesting_contract.set_beneficiary_events,
+        SetBeneficiaryEvent {
+            admin: vesting_contract.admin,
+            vesting_contract_address: contract_address,
+            shareholder,
+            old_beneficiary,
+            new_beneficiary,
+        },
+    );
+}
+
+ + + +
+ + + +## Function `reset_beneficiary` + +Remove the beneficiary for the given shareholder. All distributions will sent directly to the shareholder +account. + + +
public entry fun reset_beneficiary(account: &signer, contract_address: address, shareholder: address)
+
+ + + +
+Implementation + + +
public entry fun reset_beneficiary(
+    account: &signer,
+    contract_address: address,
+    shareholder: address,
+) acquires VestingAccountManagement, VestingContract {
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    let addr = signer::address_of(account);
+    assert!(
+        addr == vesting_contract.admin ||
+            addr == get_role_holder(contract_address, utf8(ROLE_BENEFICIARY_RESETTER)),
+        error::permission_denied(EPERMISSION_DENIED),
+    );
+
+    let beneficiaries = &mut vesting_contract.beneficiaries;
+    if (simple_map::contains_key(beneficiaries, &shareholder)) {
+        simple_map::remove(beneficiaries, &shareholder);
+    };
+}
+
+ + + +
+ + + +## Function `set_management_role` + + + +
public entry fun set_management_role(admin: &signer, contract_address: address, role: string::String, role_holder: address)
+
+ + + +
+Implementation + + +
public entry fun set_management_role(
+    admin: &signer,
+    contract_address: address,
+    role: String,
+    role_holder: address,
+) acquires VestingAccountManagement, VestingContract {
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+
+    if (!exists<VestingAccountManagement>(contract_address)) {
+        let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+        move_to(contract_signer, VestingAccountManagement {
+            roles: simple_map::create<String, address>(),
+        })
+    };
+    let roles = &mut borrow_global_mut<VestingAccountManagement>(contract_address).roles;
+    if (simple_map::contains_key(roles, &role)) {
+        *simple_map::borrow_mut(roles, &role) = role_holder;
+    } else {
+        simple_map::add(roles, role, role_holder);
+    };
+}
+
+ + + +
+ + + +## Function `set_beneficiary_resetter` + + + +
public entry fun set_beneficiary_resetter(admin: &signer, contract_address: address, beneficiary_resetter: address)
+
+ + + +
+Implementation + + +
public entry fun set_beneficiary_resetter(
+    admin: &signer,
+    contract_address: address,
+    beneficiary_resetter: address,
+) acquires VestingAccountManagement, VestingContract {
+    set_management_role(admin, contract_address, utf8(ROLE_BENEFICIARY_RESETTER), beneficiary_resetter);
+}
+
+ + + +
+ + + +## Function `set_beneficiary_for_operator` + +Set the beneficiary for the operator. + + +
public entry fun set_beneficiary_for_operator(operator: &signer, new_beneficiary: address)
+
+ + + +
+Implementation + + +
public entry fun set_beneficiary_for_operator(
+    operator: &signer,
+    new_beneficiary: address,
+) {
+    staking_contract::set_beneficiary_for_operator(operator, new_beneficiary);
+}
+
+ + + +
+ + + +## Function `get_role_holder` + + + +
public fun get_role_holder(contract_address: address, role: string::String): address
+
+ + + +
+Implementation + + +
public fun get_role_holder(contract_address: address, role: String): address acquires VestingAccountManagement {
+    assert!(exists<VestingAccountManagement>(contract_address), error::not_found(EVESTING_ACCOUNT_HAS_NO_ROLES));
+    let roles = &borrow_global<VestingAccountManagement>(contract_address).roles;
+    assert!(simple_map::contains_key(roles, &role), error::not_found(EROLE_NOT_FOUND));
+    *simple_map::borrow(roles, &role)
+}
+
+ + + +
+ + + +## Function `get_vesting_account_signer` + +For emergency use in case the admin needs emergency control of vesting contract account. +This doesn't give the admin total power as the admin would still need to follow the rules set by +staking_contract and stake modules. + + +
public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer
+
+ + + +
+Implementation + + +
public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer acquires VestingContract {
+    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    verify_admin(admin, vesting_contract);
+    get_vesting_account_signer_internal(vesting_contract)
+}
+
+ + + +
+ + + +## Function `get_vesting_account_signer_internal` + + + +
fun get_vesting_account_signer_internal(vesting_contract: &vesting::VestingContract): signer
+
+ + + +
+Implementation + + +
fun get_vesting_account_signer_internal(vesting_contract: &VestingContract): signer {
+    account::create_signer_with_capability(&vesting_contract.signer_cap)
+}
+
+ + + +
+ + + +## Function `create_vesting_contract_account` + +Create a salt for generating the resource accounts that will be holding the VestingContract. +This address should be deterministic for the same admin and vesting contract creation nonce. + + +
fun create_vesting_contract_account(admin: &signer, contract_creation_seed: vector<u8>): (signer, account::SignerCapability)
+
+ + + +
+Implementation + + +
fun create_vesting_contract_account(
+    admin: &signer,
+    contract_creation_seed: vector<u8>,
+): (signer, SignerCapability) acquires AdminStore {
+    let admin_store = borrow_global_mut<AdminStore>(signer::address_of(admin));
+    let seed = bcs::to_bytes(&signer::address_of(admin));
+    vector::append(&mut seed, bcs::to_bytes(&admin_store.nonce));
+    admin_store.nonce = admin_store.nonce + 1;
+
+    // Include a salt to avoid conflicts with any other modules out there that might also generate
+    // deterministic resource accounts for the same admin address + nonce.
+    vector::append(&mut seed, VESTING_POOL_SALT);
+    vector::append(&mut seed, contract_creation_seed);
+
+    let (account_signer, signer_cap) = account::create_resource_account(admin, seed);
+    // Register the vesting contract account to receive APT as it'll be sent to it when claiming unlocked stake from
+    // the underlying staking contract.
+    coin::register<AptosCoin>(&account_signer);
+
+    (account_signer, signer_cap)
+}
+
+ + + +
+ + + +## Function `verify_admin` + + + +
fun verify_admin(admin: &signer, vesting_contract: &vesting::VestingContract)
+
+ + + +
+Implementation + + +
fun verify_admin(admin: &signer, vesting_contract: &VestingContract) {
+    assert!(signer::address_of(admin) == vesting_contract.admin, error::unauthenticated(ENOT_ADMIN));
+}
+
+ + + +
+ + + +## Function `assert_vesting_contract_exists` + + + +
fun assert_vesting_contract_exists(contract_address: address)
+
+ + + +
+Implementation + + +
fun assert_vesting_contract_exists(contract_address: address) {
+    assert!(exists<VestingContract>(contract_address), error::not_found(EVESTING_CONTRACT_NOT_FOUND));
+}
+
+ + + +
+ + + +## Function `assert_active_vesting_contract` + + + +
fun assert_active_vesting_contract(contract_address: address)
+
+ + + +
+Implementation + + +
fun assert_active_vesting_contract(contract_address: address) acquires VestingContract {
+    assert_vesting_contract_exists(contract_address);
+    let vesting_contract = borrow_global<VestingContract>(contract_address);
+    assert!(vesting_contract.state == VESTING_POOL_ACTIVE, error::invalid_state(EVESTING_CONTRACT_NOT_ACTIVE));
+}
+
+ + + +
+ + + +## Function `unlock_stake` + + + +
fun unlock_stake(vesting_contract: &vesting::VestingContract, amount: u64)
+
+ + + +
+Implementation + + +
fun unlock_stake(vesting_contract: &VestingContract, amount: u64) {
+    let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+    staking_contract::unlock_stake(contract_signer, vesting_contract.staking.operator, amount);
+}
+
+ + + +
+ + + +## Function `withdraw_stake` + + + +
fun withdraw_stake(vesting_contract: &vesting::VestingContract, contract_address: address): coin::Coin<aptos_coin::AptosCoin>
+
+ + + +
+Implementation + + +
fun withdraw_stake(vesting_contract: &VestingContract, contract_address: address): Coin<AptosCoin> {
+    // Claim any withdrawable distribution from the staking contract. The withdrawn coins will be sent directly to
+    // the vesting contract's account.
+    staking_contract::distribute(contract_address, vesting_contract.staking.operator);
+    let withdrawn_coins = coin::balance<AptosCoin>(contract_address);
+    let contract_signer = &get_vesting_account_signer_internal(vesting_contract);
+    coin::withdraw<AptosCoin>(contract_signer, withdrawn_coins)
+}
+
+ + + +
+ + + +## Function `get_beneficiary` + + + +
fun get_beneficiary(contract: &vesting::VestingContract, shareholder: address): address
+
+ + + +
+Implementation + + +
fun get_beneficiary(contract: &VestingContract, shareholder: address): address {
+    if (simple_map::contains_key(&contract.beneficiaries, &shareholder)) {
+        *simple_map::borrow(&contract.beneficiaries, &shareholder)
+    } else {
+        shareholder
+    }
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1In order to retrieve the address of the underlying stake pool, the vesting start timestamp of the vesting contract, the duration of the vesting period, the remaining grant of a vesting contract, the beneficiary account of a shareholder in a vesting contract, the percentage of accumulated rewards that is paid to the operator as commission, the operator who runs the validator, the voter who will be voting on-chain, and the vesting schedule of a vesting contract, the supplied vesting contract should exist.LowThe vesting_start_secs, period_duration_secs, remaining_grant, beneficiary, operator_commission_percentage, operator, voter, and vesting_schedule functions ensure that the supplied vesting contract address exists by calling the assert_vesting_contract_exists function.Formally verified via assert_vesting_contract_exists.
2The vesting pool should not exceed a maximum of 30 shareholders.MediumThe maximum number of shareholders a vesting pool can support is stored as a constant in MAXIMUM_SHAREHOLDERS which is passed to the pool_u64::create function.Formally verified via a global invariant.
3Retrieving all the vesting contracts of a given address and retrieving the list of beneficiaries from a vesting contract should never fail.MediumThe function vesting_contracts checks if the supplied admin address contains an AdminStore resource and returns all the vesting contracts as a vector
. Otherwise it returns an empty vector. The function get_beneficiary checks for a given vesting contract, a specific shareholder exists, and if so, the beneficiary will be returned, otherwise it will simply return the address of the shareholder.
Formally verified via vesting_contracts and get_beneficiary.
4The shareholders should be able to start vesting only after the vesting cliff and the first vesting period have transpired.HighThe end of the vesting cliff is stored under VestingContract.vesting_schedule.start_timestamp_secs. The vest function always checks that timestamp::now_seconds is greater or equal to the end of the vesting cliff period.Audited the check for the end of vesting cliff: vest module.
5In order to retrieve the total accumulated rewards that have not been distributed, the accumulated rewards of a given beneficiary, the list of al shareholders in a vesting contract,the shareholder address given the beneficiary address in a given vesting contract, to terminate a vesting contract and to distribute any withdrawable stake from the stake pool, the supplied vesting contract should exist and be active.LowThe distribute, terminate_vesting_contract, shareholder, shareholders, accumulated_rewards, and total_accumulated_rewards functions ensure that the supplied vesting contract address exists and is active by calling the assert_active_vesting_contract function.Formally verified via ActiveVestingContractAbortsIf.
6A new vesting schedule should not be allowed to start vesting in the past or to supply an empty schedule or for the period duration to be zero.HighThe create_vesting_schedule function ensures that the length of the schedule vector is greater than 0, that the period duration is greater than 0 and that the start_timestamp_secs is greater or equal to timestamp::now_seconds.Formally verified via create_vesting_schedule.
7The shareholders should be able to vest the tokens from previous periods.HighWhen vesting, the last_completed_period is checked against the next period to vest. This allows to unlock vested tokens for the next period since last vested, in case they didn't call vest for some periods.Audited that vesting doesn't skip periods, but gradually increments to allow shareholders to retrieve all the vested tokens.
8Actions such as obtaining a list of shareholders, calculating accrued rewards, distributing withdrawable stake, and terminating the vesting contract should be accessible exclusively while the vesting contract remains active.LowRestricting access to inactive vesting contracts is achieved through the assert_active_vesting_contract function.Formally verified via ActiveVestingContractAbortsIf.
9The ability to terminate a vesting contract should only be available to the owner.HighLimiting the access of accounts to specific function, is achieved by asserting that the signer matches the admin of the VestingContract.Formally verified via verify_admin.
10A new vesting contract should not be allowed to have an empty list of shareholders, have a different amount of shareholders than buy-ins, and provide a withdrawal address which is either reserved or not registered for apt.HighThe create_vesting_contract function ensures that the withdrawal_address is not a reserved address, that it is registered for apt, that the list of shareholders is non-empty, and that the amount of shareholders matches the amount of buy_ins.Formally verified via create_vesting_contract.
11Creating a vesting contract account should require the signer (admin) to own an admin store and should enforce that the seed of the resource account is composed of the admin store's nonce, the vesting pool salt, and the custom contract creation seed.MediumThe create_vesting_contract_account concatenates to the seed first the admin_store.nonce then the VESTING_POOL_SALT then the contract_creation_seed and then it is passed to the create_resource_account function.Enforced via create_vesting_contract_account.
+ + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+// This enforces high-level requirement 2:
+invariant forall a: address where exists<VestingContract>(a):
+    global<VestingContract>(a).grant_pool.shareholders_limit <= MAXIMUM_SHAREHOLDERS;
+
+ + + + + +### Function `stake_pool_address` + + +
#[view]
+public fun stake_pool_address(vesting_contract_address: address): address
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `vesting_start_secs` + + +
#[view]
+public fun vesting_start_secs(vesting_contract_address: address): u64
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `period_duration_secs` + + +
#[view]
+public fun period_duration_secs(vesting_contract_address: address): u64
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `remaining_grant` + + +
#[view]
+public fun remaining_grant(vesting_contract_address: address): u64
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `beneficiary` + + +
#[view]
+public fun beneficiary(vesting_contract_address: address, shareholder: address): address
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `operator_commission_percentage` + + +
#[view]
+public fun operator_commission_percentage(vesting_contract_address: address): u64
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `vesting_contracts` + + +
#[view]
+public fun vesting_contracts(admin: address): vector<address>
+
+ + + + +
// This enforces high-level requirement 3:
+aborts_if false;
+
+ + + + + +### Function `operator` + + +
#[view]
+public fun operator(vesting_contract_address: address): address
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `voter` + + +
#[view]
+public fun voter(vesting_contract_address: address): address
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `vesting_schedule` + + +
#[view]
+public fun vesting_schedule(vesting_contract_address: address): vesting::VestingSchedule
+
+ + + + +
aborts_if !exists<VestingContract>(vesting_contract_address);
+
+ + + + + +### Function `total_accumulated_rewards` + + +
#[view]
+public fun total_accumulated_rewards(vesting_contract_address: address): u64
+
+ + + + +
pragma verify = false;
+include TotalAccumulatedRewardsAbortsIf;
+
+ + + + + + + +
schema TotalAccumulatedRewardsAbortsIf {
+    vesting_contract_address: address;
+    include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
+    let vesting_contract = global<VestingContract>(vesting_contract_address);
+    let staker = vesting_contract_address;
+    let operator = vesting_contract.staking.operator;
+    let staking_contracts = global<staking_contract::Store>(staker).staking_contracts;
+    let staking_contract = simple_map::spec_get(staking_contracts, operator);
+    aborts_if !exists<staking_contract::Store>(staker);
+    aborts_if !simple_map::spec_contains_key(staking_contracts, operator);
+    let pool_address = staking_contract.pool_address;
+    let stake_pool = global<stake::StakePool>(pool_address);
+    let active = coin::value(stake_pool.active);
+    let pending_active = coin::value(stake_pool.pending_active);
+    let total_active_stake = active + pending_active;
+    let accumulated_rewards = total_active_stake - staking_contract.principal;
+    let commission_amount = accumulated_rewards * staking_contract.commission_percentage / 100;
+    aborts_if !exists<stake::StakePool>(pool_address);
+    aborts_if active + pending_active > MAX_U64;
+    aborts_if total_active_stake < staking_contract.principal;
+    aborts_if accumulated_rewards * staking_contract.commission_percentage > MAX_U64;
+    aborts_if (vesting_contract.remaining_grant + commission_amount) > total_active_stake;
+    aborts_if total_active_stake < vesting_contract.remaining_grant;
+}
+
+ + + + + +### Function `accumulated_rewards` + + +
#[view]
+public fun accumulated_rewards(vesting_contract_address: address, shareholder_or_beneficiary: address): u64
+
+ + + + +
pragma verify = false;
+include TotalAccumulatedRewardsAbortsIf;
+let vesting_contract = global<VestingContract>(vesting_contract_address);
+let operator = vesting_contract.staking.operator;
+let staking_contracts = global<staking_contract::Store>(vesting_contract_address).staking_contracts;
+let staking_contract = simple_map::spec_get(staking_contracts, operator);
+let pool_address = staking_contract.pool_address;
+let stake_pool = global<stake::StakePool>(pool_address);
+let active = coin::value(stake_pool.active);
+let pending_active = coin::value(stake_pool.pending_active);
+let total_active_stake = active + pending_active;
+let accumulated_rewards = total_active_stake - staking_contract.principal;
+let commission_amount = accumulated_rewards * staking_contract.commission_percentage / 100;
+let total_accumulated_rewards = total_active_stake - vesting_contract.remaining_grant - commission_amount;
+let shareholder = spec_shareholder(vesting_contract_address, shareholder_or_beneficiary);
+let pool = vesting_contract.grant_pool;
+let shares = pool_u64::spec_shares(pool, shareholder);
+aborts_if pool.total_coins > 0 && pool.total_shares > 0
+    && (shares * total_accumulated_rewards) / pool.total_shares > MAX_U64;
+ensures result == pool_u64::spec_shares_to_amount_with_total_coins(pool, shares, total_accumulated_rewards);
+
+ + + + + +### Function `shareholders` + + +
#[view]
+public fun shareholders(vesting_contract_address: address): vector<address>
+
+ + + + +
include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
+
+ + + + + + + +
fun spec_shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address;
+
+ + + + + +### Function `shareholder` + + +
#[view]
+public fun shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address
+
+ + + + +
pragma opaque;
+include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
+ensures [abstract] result == spec_shareholder(vesting_contract_address, shareholder_or_beneficiary);
+
+ + + + + +### Function `create_vesting_schedule` + + +
public fun create_vesting_schedule(schedule: vector<fixed_point32::FixedPoint32>, start_timestamp_secs: u64, period_duration: u64): vesting::VestingSchedule
+
+ + + + +
// This enforces high-level requirement 6:
+aborts_if !(len(schedule) > 0);
+aborts_if !(period_duration > 0);
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if !(start_timestamp_secs >= timestamp::now_seconds());
+
+ + + + + +### Function `create_vesting_contract` + + +
public fun create_vesting_contract(admin: &signer, shareholders: &vector<address>, buy_ins: simple_map::SimpleMap<address, coin::Coin<aptos_coin::AptosCoin>>, vesting_schedule: vesting::VestingSchedule, withdrawal_address: address, operator: address, voter: address, commission_percentage: u64, contract_creation_seed: vector<u8>): address
+
+ + + + +
pragma verify = false;
+// This enforces high-level requirement 10:
+aborts_if withdrawal_address == @aptos_framework || withdrawal_address == @vm_reserved;
+aborts_if !exists<account::Account>(withdrawal_address);
+aborts_if !exists<coin::CoinStore<AptosCoin>>(withdrawal_address);
+aborts_if len(shareholders) == 0;
+aborts_if simple_map::spec_len(buy_ins) != len(shareholders);
+ensures global<VestingContract>(result).grant_pool.shareholders_limit == 30;
+
+ + + + + +### Function `unlock_rewards` + + +
public entry fun unlock_rewards(contract_address: address)
+
+ + + + +
pragma verify = false;
+include UnlockRewardsAbortsIf;
+
+ + + + + + + +
schema UnlockRewardsAbortsIf {
+    contract_address: address;
+    include TotalAccumulatedRewardsAbortsIf { vesting_contract_address: contract_address };
+    let vesting_contract = global<VestingContract>(contract_address);
+    let operator = vesting_contract.staking.operator;
+    let staking_contracts = global<staking_contract::Store>(contract_address).staking_contracts;
+    let staking_contract = simple_map::spec_get(staking_contracts, operator);
+    let pool_address = staking_contract.pool_address;
+    let stake_pool = global<stake::StakePool>(pool_address);
+    let active = coin::value(stake_pool.active);
+    let pending_active = coin::value(stake_pool.pending_active);
+    let total_active_stake = active + pending_active;
+    let accumulated_rewards = total_active_stake - staking_contract.principal;
+    let commission_amount = accumulated_rewards * staking_contract.commission_percentage / 100;
+    let amount = total_active_stake - vesting_contract.remaining_grant - commission_amount;
+    include UnlockStakeAbortsIf { vesting_contract, amount };
+}
+
+ + + + + +### Function `unlock_rewards_many` + + +
public entry fun unlock_rewards_many(contract_addresses: vector<address>)
+
+ + + + +
pragma verify = false;
+aborts_if len(contract_addresses) == 0;
+
+ + + + + +### Function `vest` + + +
public entry fun vest(contract_address: address)
+
+ + + + +
pragma verify = false;
+include UnlockRewardsAbortsIf;
+
+ + + + + +### Function `vest_many` + + +
public entry fun vest_many(contract_addresses: vector<address>)
+
+ + + + +
pragma verify = false;
+aborts_if len(contract_addresses) == 0;
+
+ + + + + +### Function `distribute` + + +
public entry fun distribute(contract_address: address)
+
+ + + + +
pragma verify = false;
+include ActiveVestingContractAbortsIf<VestingContract>;
+let vesting_contract = global<VestingContract>(contract_address);
+include WithdrawStakeAbortsIf { vesting_contract };
+
+ + + + + +### Function `distribute_many` + + +
public entry fun distribute_many(contract_addresses: vector<address>)
+
+ + + + +
pragma verify = false;
+aborts_if len(contract_addresses) == 0;
+
+ + + + + +### Function `terminate_vesting_contract` + + +
public entry fun terminate_vesting_contract(admin: &signer, contract_address: address)
+
+ + + + +
pragma verify = false;
+include ActiveVestingContractAbortsIf<VestingContract>;
+let vesting_contract = global<VestingContract>(contract_address);
+include WithdrawStakeAbortsIf { vesting_contract };
+
+ + + + + +### Function `admin_withdraw` + + +
public entry fun admin_withdraw(admin: &signer, contract_address: address)
+
+ + + + +
pragma verify = false;
+let vesting_contract = global<VestingContract>(contract_address);
+aborts_if vesting_contract.state != VESTING_POOL_TERMINATED;
+include VerifyAdminAbortsIf;
+include WithdrawStakeAbortsIf { vesting_contract };
+
+ + + + + +### Function `update_operator` + + +
public entry fun update_operator(admin: &signer, contract_address: address, new_operator: address, commission_percentage: u64)
+
+ + + + +
pragma verify = false;
+include VerifyAdminAbortsIf;
+let vesting_contract = global<VestingContract>(contract_address);
+let acc = vesting_contract.signer_cap.account;
+let old_operator = vesting_contract.staking.operator;
+include staking_contract::ContractExistsAbortsIf { staker: acc, operator: old_operator };
+let store = global<staking_contract::Store>(acc);
+let staking_contracts = store.staking_contracts;
+aborts_if simple_map::spec_contains_key(staking_contracts, new_operator);
+let staking_contract = simple_map::spec_get(staking_contracts, old_operator);
+include DistributeInternalAbortsIf { staker: acc, operator: old_operator, staking_contract, distribute_events: store.distribute_events };
+
+ + + + + +### Function `update_operator_with_same_commission` + + +
public entry fun update_operator_with_same_commission(admin: &signer, contract_address: address, new_operator: address)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `update_commission_percentage` + + +
public entry fun update_commission_percentage(admin: &signer, contract_address: address, new_commission_percentage: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `update_voter` + + +
public entry fun update_voter(admin: &signer, contract_address: address, new_voter: address)
+
+ + + + +
pragma verify_duration_estimate = 300;
+include VerifyAdminAbortsIf;
+let vesting_contract = global<VestingContract>(contract_address);
+let operator = vesting_contract.staking.operator;
+let staker = vesting_contract.signer_cap.account;
+include staking_contract::UpdateVoterSchema;
+
+ + + + + +### Function `reset_lockup` + + +
public entry fun reset_lockup(admin: &signer, contract_address: address)
+
+ + + + +
pragma verify_duration_estimate = 300;
+aborts_if !exists<VestingContract>(contract_address);
+let vesting_contract = global<VestingContract>(contract_address);
+aborts_if signer::address_of(admin) != vesting_contract.admin;
+let operator = vesting_contract.staking.operator;
+let staker = vesting_contract.signer_cap.account;
+include staking_contract::ContractExistsAbortsIf {staker, operator};
+include staking_contract::IncreaseLockupWithCapAbortsIf {staker, operator};
+let store = global<staking_contract::Store>(staker);
+let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+let pool_address = staking_contract.owner_cap.pool_address;
+aborts_if !exists<stake::StakePool>(vesting_contract.staking.pool_address);
+
+ + + + + +### Function `set_beneficiary` + + +
public entry fun set_beneficiary(admin: &signer, contract_address: address, shareholder: address, new_beneficiary: address)
+
+ + + + +
pragma verify_duration_estimate = 300;
+pragma aborts_if_is_partial;
+aborts_if !account::exists_at(new_beneficiary);
+aborts_if !coin::spec_is_account_registered<AptosCoin>(new_beneficiary);
+include VerifyAdminAbortsIf;
+let post vesting_contract = global<VestingContract>(contract_address);
+ensures simple_map::spec_contains_key(vesting_contract.beneficiaries,shareholder);
+
+ + + + + +### Function `reset_beneficiary` + + +
public entry fun reset_beneficiary(account: &signer, contract_address: address, shareholder: address)
+
+ + + + +
aborts_if !exists<VestingContract>(contract_address);
+let addr = signer::address_of(account);
+let vesting_contract = global<VestingContract>(contract_address);
+aborts_if addr != vesting_contract.admin && !std::string::spec_internal_check_utf8(ROLE_BENEFICIARY_RESETTER);
+aborts_if addr != vesting_contract.admin && !exists<VestingAccountManagement>(contract_address);
+let roles = global<VestingAccountManagement>(contract_address).roles;
+let role = std::string::spec_utf8(ROLE_BENEFICIARY_RESETTER);
+aborts_if addr != vesting_contract.admin && !simple_map::spec_contains_key(roles, role);
+aborts_if addr != vesting_contract.admin && addr != simple_map::spec_get(roles, role);
+let post post_vesting_contract = global<VestingContract>(contract_address);
+ensures !simple_map::spec_contains_key(post_vesting_contract.beneficiaries,shareholder);
+
+ + + + + +### Function `set_management_role` + + +
public entry fun set_management_role(admin: &signer, contract_address: address, role: string::String, role_holder: address)
+
+ + + + +
pragma aborts_if_is_partial;
+include SetManagementRoleAbortsIf;
+
+ + + + + +### Function `set_beneficiary_resetter` + + +
public entry fun set_beneficiary_resetter(admin: &signer, contract_address: address, beneficiary_resetter: address)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if !std::string::spec_internal_check_utf8(ROLE_BENEFICIARY_RESETTER);
+include SetManagementRoleAbortsIf;
+
+ + + + + +### Function `set_beneficiary_for_operator` + + +
public entry fun set_beneficiary_for_operator(operator: &signer, new_beneficiary: address)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `get_role_holder` + + +
public fun get_role_holder(contract_address: address, role: string::String): address
+
+ + + + +
aborts_if !exists<VestingAccountManagement>(contract_address);
+let roles = global<VestingAccountManagement>(contract_address).roles;
+aborts_if !simple_map::spec_contains_key(roles,role);
+
+ + + + + +### Function `get_vesting_account_signer` + + +
public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer
+
+ + + + +
include VerifyAdminAbortsIf;
+
+ + + + + +### Function `get_vesting_account_signer_internal` + + +
fun get_vesting_account_signer_internal(vesting_contract: &vesting::VestingContract): signer
+
+ + + + +
aborts_if false;
+
+ + + + + + + +
fun spec_get_vesting_account_signer(vesting_contract: VestingContract): signer;
+
+ + + + + +### Function `create_vesting_contract_account` + + +
fun create_vesting_contract_account(admin: &signer, contract_creation_seed: vector<u8>): (signer, account::SignerCapability)
+
+ + + + +
pragma verify_duration_estimate = 300;
+let admin_addr = signer::address_of(admin);
+let admin_store = global<AdminStore>(admin_addr);
+let seed = bcs::to_bytes(admin_addr);
+let nonce = bcs::to_bytes(admin_store.nonce);
+let first = concat(seed, nonce);
+let second = concat(first, VESTING_POOL_SALT);
+let end = concat(second, contract_creation_seed);
+// This enforces high-level requirement 11:
+let resource_addr = account::spec_create_resource_address(admin_addr, end);
+aborts_if !exists<AdminStore>(admin_addr);
+aborts_if len(account::ZERO_AUTH_KEY) != 32;
+aborts_if admin_store.nonce + 1 > MAX_U64;
+let ea = account::exists_at(resource_addr);
+include if (ea) account::CreateResourceAccountAbortsIf else account::CreateAccountAbortsIf {addr: resource_addr};
+let acc = global<account::Account>(resource_addr);
+let post post_acc = global<account::Account>(resource_addr);
+aborts_if !exists<coin::CoinStore<AptosCoin>>(resource_addr) && !aptos_std::type_info::spec_is_struct<AptosCoin>();
+aborts_if !exists<coin::CoinStore<AptosCoin>>(resource_addr) && ea && acc.guid_creation_num + 2 > MAX_U64;
+aborts_if !exists<coin::CoinStore<AptosCoin>>(resource_addr) && ea && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+ensures exists<account::Account>(resource_addr) && post_acc.authentication_key == account::ZERO_AUTH_KEY &&
+        exists<coin::CoinStore<AptosCoin>>(resource_addr);
+ensures signer::address_of(result_1) == resource_addr;
+ensures result_2.account == resource_addr;
+
+ + + + + +### Function `verify_admin` + + +
fun verify_admin(admin: &signer, vesting_contract: &vesting::VestingContract)
+
+ + + + +
// This enforces high-level requirement 9:
+aborts_if signer::address_of(admin) != vesting_contract.admin;
+
+ + + + + +### Function `assert_vesting_contract_exists` + + +
fun assert_vesting_contract_exists(contract_address: address)
+
+ + + + +
// This enforces high-level requirement 1:
+aborts_if !exists<VestingContract>(contract_address);
+
+ + + + + +### Function `assert_active_vesting_contract` + + +
fun assert_active_vesting_contract(contract_address: address)
+
+ + + + +
include ActiveVestingContractAbortsIf<VestingContract>;
+
+ + + + + +### Function `unlock_stake` + + +
fun unlock_stake(vesting_contract: &vesting::VestingContract, amount: u64)
+
+ + + + +
pragma verify = false;
+include UnlockStakeAbortsIf;
+
+ + + + + + + +
schema UnlockStakeAbortsIf {
+    vesting_contract: &VestingContract;
+    amount: u64;
+    let acc = vesting_contract.signer_cap.account;
+    let operator = vesting_contract.staking.operator;
+    include amount != 0 ==> staking_contract::ContractExistsAbortsIf { staker: acc, operator };
+    let store = global<staking_contract::Store>(acc);
+    let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+    include amount != 0 ==> DistributeInternalAbortsIf { staker: acc, operator, staking_contract, distribute_events: store.distribute_events };
+}
+
+ + + + + +### Function `withdraw_stake` + + +
fun withdraw_stake(vesting_contract: &vesting::VestingContract, contract_address: address): coin::Coin<aptos_coin::AptosCoin>
+
+ + + + +
pragma verify = false;
+include WithdrawStakeAbortsIf;
+
+ + + + + + + +
schema WithdrawStakeAbortsIf {
+    vesting_contract: &VestingContract;
+    contract_address: address;
+    let operator = vesting_contract.staking.operator;
+    include staking_contract::ContractExistsAbortsIf { staker: contract_address, operator };
+    let store = global<staking_contract::Store>(contract_address);
+    let staking_contract = simple_map::spec_get(store.staking_contracts, operator);
+    include DistributeInternalAbortsIf { staker: contract_address, operator, staking_contract, distribute_events: store.distribute_events };
+}
+
+ + + + + + + +
schema DistributeInternalAbortsIf {
+    staker: address;
+    operator: address;
+    staking_contract: staking_contract::StakingContract;
+    distribute_events: EventHandle<staking_contract::DistributeEvent>;
+    let pool_address = staking_contract.pool_address;
+    aborts_if !exists<stake::StakePool>(pool_address);
+    let stake_pool = global<stake::StakePool>(pool_address);
+    let inactive = stake_pool.inactive.value;
+    let pending_inactive = stake_pool.pending_inactive.value;
+    aborts_if inactive + pending_inactive > MAX_U64;
+    let total_potential_withdrawable = inactive + pending_inactive;
+    let pool_address_1 = staking_contract.owner_cap.pool_address;
+    aborts_if !exists<stake::StakePool>(pool_address_1);
+    let stake_pool_1 = global<stake::StakePool>(pool_address_1);
+    aborts_if !exists<stake::ValidatorSet>(@aptos_framework);
+    let validator_set = global<stake::ValidatorSet>(@aptos_framework);
+    let inactive_state = !stake::spec_contains(validator_set.pending_active, pool_address_1)
+        && !stake::spec_contains(validator_set.active_validators, pool_address_1)
+        && !stake::spec_contains(validator_set.pending_inactive, pool_address_1);
+    let inactive_1 = stake_pool_1.inactive.value;
+    let pending_inactive_1 = stake_pool_1.pending_inactive.value;
+    let new_inactive_1 = inactive_1 + pending_inactive_1;
+    aborts_if inactive_state && timestamp::spec_now_seconds() >= stake_pool_1.locked_until_secs
+        && inactive_1 + pending_inactive_1 > MAX_U64;
+}
+
+ + + + + +### Function `get_beneficiary` + + +
fun get_beneficiary(contract: &vesting::VestingContract, shareholder: address): address
+
+ + + + +
// This enforces high-level requirement 3:
+aborts_if false;
+
+ + + + + + + +
schema SetManagementRoleAbortsIf {
+    contract_address: address;
+    admin: signer;
+    aborts_if !exists<VestingContract>(contract_address);
+    let vesting_contract = global<VestingContract>(contract_address);
+    aborts_if signer::address_of(admin) != vesting_contract.admin;
+}
+
+ + + + + + + +
schema VerifyAdminAbortsIf {
+    contract_address: address;
+    admin: signer;
+    aborts_if !exists<VestingContract>(contract_address);
+    let vesting_contract = global<VestingContract>(contract_address);
+    aborts_if signer::address_of(admin) != vesting_contract.admin;
+}
+
+ + + + + + + +
schema ActiveVestingContractAbortsIf<VestingContract> {
+    contract_address: address;
+    // This enforces high-level requirement 5:
+    aborts_if !exists<VestingContract>(contract_address);
+    let vesting_contract = global<VestingContract>(contract_address);
+    // This enforces high-level requirement 8:
+    aborts_if vesting_contract.state != VESTING_POOL_ACTIVE;
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/compiler-v2-doc/voting.md b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/voting.md new file mode 100644 index 0000000000000..431eb53db9cb5 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/compiler-v2-doc/voting.md @@ -0,0 +1,2596 @@ + + + +# Module `0x1::voting` + + +This is the general Voting module that can be used as part of a DAO Governance. Voting is designed to be used by +standalone governance modules, who has full control over the voting flow and is responsible for voting power +calculation and including proper capabilities when creating the proposal so resolution can go through. +On-chain governance of the Aptos network also uses Voting. + +The voting flow: +1. The Voting module can be deployed at a known address (e.g. 0x1 for Aptos on-chain governance) +2. The governance module, e.g. AptosGovernance, can be deployed later and define a GovernanceProposal resource type +that can also contain other information such as Capability resource for authorization. +3. The governance module's owner can then register the ProposalType with Voting. This also hosts the proposal list +(forum) on the calling account. +4. A proposer, through the governance module, can call Voting::create_proposal to create a proposal. create_proposal +cannot be called directly not through the governance module. A script hash of the resolution script that can later +be called to execute the proposal is required. +5. A voter, through the governance module, can call Voting::vote on a proposal. vote requires passing a &ProposalType +and thus only the governance module that registers ProposalType can call vote. +6. Once the proposal's expiration time has passed and more than the defined threshold has voted yes on the proposal, +anyone can call resolve which returns the content of the proposal (of type ProposalType) that can be used to execute. +7. Only the resolution script with the same script hash specified in the proposal can call Voting::resolve as part of +the resolution process. + + +- [Struct `Proposal`](#0x1_voting_Proposal) +- [Resource `VotingForum`](#0x1_voting_VotingForum) +- [Struct `VotingEvents`](#0x1_voting_VotingEvents) +- [Struct `CreateProposal`](#0x1_voting_CreateProposal) +- [Struct `RegisterForum`](#0x1_voting_RegisterForum) +- [Struct `Vote`](#0x1_voting_Vote) +- [Struct `ResolveProposal`](#0x1_voting_ResolveProposal) +- [Struct `CreateProposalEvent`](#0x1_voting_CreateProposalEvent) +- [Struct `RegisterForumEvent`](#0x1_voting_RegisterForumEvent) +- [Struct `VoteEvent`](#0x1_voting_VoteEvent) +- [Constants](#@Constants_0) +- [Function `register`](#0x1_voting_register) +- [Function `create_proposal`](#0x1_voting_create_proposal) +- [Function `create_proposal_v2`](#0x1_voting_create_proposal_v2) +- [Function `vote`](#0x1_voting_vote) +- [Function `is_proposal_resolvable`](#0x1_voting_is_proposal_resolvable) +- [Function `resolve`](#0x1_voting_resolve) +- [Function `resolve_proposal_v2`](#0x1_voting_resolve_proposal_v2) +- [Function `next_proposal_id`](#0x1_voting_next_proposal_id) +- [Function `get_proposer`](#0x1_voting_get_proposer) +- [Function `is_voting_closed`](#0x1_voting_is_voting_closed) +- [Function `can_be_resolved_early`](#0x1_voting_can_be_resolved_early) +- [Function `get_proposal_metadata`](#0x1_voting_get_proposal_metadata) +- [Function `get_proposal_metadata_value`](#0x1_voting_get_proposal_metadata_value) +- [Function `get_proposal_state`](#0x1_voting_get_proposal_state) +- [Function `get_proposal_creation_secs`](#0x1_voting_get_proposal_creation_secs) +- [Function `get_proposal_expiration_secs`](#0x1_voting_get_proposal_expiration_secs) +- [Function `get_execution_hash`](#0x1_voting_get_execution_hash) +- [Function `get_min_vote_threshold`](#0x1_voting_get_min_vote_threshold) +- [Function `get_early_resolution_vote_threshold`](#0x1_voting_get_early_resolution_vote_threshold) +- [Function `get_votes`](#0x1_voting_get_votes) +- [Function `is_resolved`](#0x1_voting_is_resolved) +- [Function `get_resolution_time_secs`](#0x1_voting_get_resolution_time_secs) +- [Function `is_multi_step_proposal_in_execution`](#0x1_voting_is_multi_step_proposal_in_execution) +- [Function `is_voting_period_over`](#0x1_voting_is_voting_period_over) +- [Function `get_proposal`](#0x1_voting_get_proposal) +- [Specification](#@Specification_1) + - [High-level Requirements](#high-level-req) + - [Module-level Specification](#module-level-spec) + - [Function `register`](#@Specification_1_register) + - [Function `create_proposal`](#@Specification_1_create_proposal) + - [Function `create_proposal_v2`](#@Specification_1_create_proposal_v2) + - [Function `vote`](#@Specification_1_vote) + - [Function `is_proposal_resolvable`](#@Specification_1_is_proposal_resolvable) + - [Function `resolve`](#@Specification_1_resolve) + - [Function `resolve_proposal_v2`](#@Specification_1_resolve_proposal_v2) + - [Function `next_proposal_id`](#@Specification_1_next_proposal_id) + - [Function `get_proposer`](#@Specification_1_get_proposer) + - [Function `is_voting_closed`](#@Specification_1_is_voting_closed) + - [Function `can_be_resolved_early`](#@Specification_1_can_be_resolved_early) + - [Function `get_proposal_metadata`](#@Specification_1_get_proposal_metadata) + - [Function `get_proposal_metadata_value`](#@Specification_1_get_proposal_metadata_value) + - [Function `get_proposal_state`](#@Specification_1_get_proposal_state) + - [Function `get_proposal_creation_secs`](#@Specification_1_get_proposal_creation_secs) + - [Function `get_proposal_expiration_secs`](#@Specification_1_get_proposal_expiration_secs) + - [Function `get_execution_hash`](#@Specification_1_get_execution_hash) + - [Function `get_min_vote_threshold`](#@Specification_1_get_min_vote_threshold) + - [Function `get_early_resolution_vote_threshold`](#@Specification_1_get_early_resolution_vote_threshold) + - [Function `get_votes`](#@Specification_1_get_votes) + - [Function `is_resolved`](#@Specification_1_is_resolved) + - [Function `get_resolution_time_secs`](#@Specification_1_get_resolution_time_secs) + - [Function `is_multi_step_proposal_in_execution`](#@Specification_1_is_multi_step_proposal_in_execution) + - [Function `is_voting_period_over`](#@Specification_1_is_voting_period_over) + + +
use 0x1::account;
+use 0x1::bcs;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::from_bcs;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::table;
+use 0x1::timestamp;
+use 0x1::transaction_context;
+use 0x1::type_info;
+
+ + + + + +## Struct `Proposal` + +Extra metadata (e.g. description, code url) can be part of the ProposalType struct. + + +
struct Proposal<ProposalType: store> has store
+
+ + + +
+Fields + + +
+
+proposer: address +
+
+ Required. The address of the proposer. +
+
+execution_content: option::Option<ProposalType> +
+
+ Required. Should contain enough information to execute later, for example the required capability. + This is stored as an option so we can return it to governance when the proposal is resolved. +
+
+metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ Optional. Value is serialized value of an attribute. + Currently, we have three attributes that are used by the voting flow. + 1. RESOLVABLE_TIME_METADATA_KEY: this is uesed to record the resolvable time to ensure that resolution has to be done non-atomically. + 2. IS_MULTI_STEP_PROPOSAL_KEY: this is used to track if a proposal is single-step or multi-step. + 3. IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY: this attribute only applies to multi-step proposals. A single-step proposal will not have + this field in its metadata map. The value is used to indicate if a multi-step proposal is in execution. If yes, we will disable further + voting for this multi-step proposal. +
+
+creation_time_secs: u64 +
+
+ Timestamp when the proposal was created. +
+
+execution_hash: vector<u8> +
+
+ Required. The hash for the execution script module. Only the same exact script module can resolve this + proposal. +
+
+min_vote_threshold: u128 +
+
+ A proposal is only resolved if expiration has passed and the number of votes is above threshold. +
+
+expiration_secs: u64 +
+
+ +
+
+early_resolution_vote_threshold: option::Option<u128> +
+
+ Optional. Early resolution threshold. If specified, the proposal can be resolved early if the total + number of yes or no votes passes this threshold. + For example, this can be set to 50% of the total supply of the voting token, so if > 50% vote yes or no, + the proposal can be resolved before expiration. +
+
+yes_votes: u128 +
+
+ Number of votes for each outcome. + u128 since the voting power is already u64 and can add up to more than u64 can hold. +
+
+no_votes: u128 +
+
+ +
+
+is_resolved: bool +
+
+ Whether the proposal has been resolved. +
+
+resolution_time_secs: u64 +
+
+ Resolution timestamp if the proposal has been resolved. 0 otherwise. +
+
+ + +
+ + + +## Resource `VotingForum` + + + +
struct VotingForum<ProposalType: store> has key
+
+ + + +
+Fields + + +
+
+proposals: table::Table<u64, voting::Proposal<ProposalType>> +
+
+ Use Table for execution optimization instead of Vector for gas cost since Vector is read entirely into memory + during execution while only relevant Table entries are. +
+
+events: voting::VotingEvents +
+
+ +
+
+next_proposal_id: u64 +
+
+ Unique identifier for a proposal. This allows for 2 * 10**19 proposals. +
+
+ + +
+ + + +## Struct `VotingEvents` + + + +
struct VotingEvents has store
+
+ + + +
+Fields + + +
+
+create_proposal_events: event::EventHandle<voting::CreateProposalEvent> +
+
+ +
+
+register_forum_events: event::EventHandle<voting::RegisterForumEvent> +
+
+ +
+
+resolve_proposal_events: event::EventHandle<voting::ResolveProposal> +
+
+ +
+
+vote_events: event::EventHandle<voting::VoteEvent> +
+
+ +
+
+ + +
+ + + +## Struct `CreateProposal` + + + +
#[event]
+struct CreateProposal has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+early_resolution_vote_threshold: option::Option<u128> +
+
+ +
+
+execution_hash: vector<u8> +
+
+ +
+
+expiration_secs: u64 +
+
+ +
+
+metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+min_vote_threshold: u128 +
+
+ +
+
+ + +
+ + + +## Struct `RegisterForum` + + + +
#[event]
+struct RegisterForum has drop, store
+
+ + + +
+Fields + + +
+
+hosting_account: address +
+
+ +
+
+proposal_type_info: type_info::TypeInfo +
+
+ +
+
+ + +
+ + + +## Struct `Vote` + + + +
#[event]
+struct Vote has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+num_votes: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ResolveProposal` + + + +
#[event]
+struct ResolveProposal has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+yes_votes: u128 +
+
+ +
+
+no_votes: u128 +
+
+ +
+
+resolved_early: bool +
+
+ +
+
+ + +
+ + + +## Struct `CreateProposalEvent` + + + +
struct CreateProposalEvent has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+early_resolution_vote_threshold: option::Option<u128> +
+
+ +
+
+execution_hash: vector<u8> +
+
+ +
+
+expiration_secs: u64 +
+
+ +
+
+metadata: simple_map::SimpleMap<string::String, vector<u8>> +
+
+ +
+
+min_vote_threshold: u128 +
+
+ +
+
+ + +
+ + + +## Struct `RegisterForumEvent` + + + +
struct RegisterForumEvent has drop, store
+
+ + + +
+Fields + + +
+
+hosting_account: address +
+
+ +
+
+proposal_type_info: type_info::TypeInfo +
+
+ +
+
+ + +
+ + + +## Struct `VoteEvent` + + + +
struct VoteEvent has drop, store
+
+ + + +
+Fields + + +
+
+proposal_id: u64 +
+
+ +
+
+num_votes: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Minimum vote threshold cannot be higher than early resolution threshold. + + +
const EINVALID_MIN_VOTE_THRESHOLD: u64 = 7;
+
+ + + + + +If a proposal is multi-step, we need to use resolve_proposal_v2() to resolve it. +If we use resolve() to resolve a multi-step proposal, it will fail with EMULTI_STEP_PROPOSAL_CANNOT_USE_SINGLE_STEP_RESOLVE_FUNCTION. + + +
const EMULTI_STEP_PROPOSAL_CANNOT_USE_SINGLE_STEP_RESOLVE_FUNCTION: u64 = 10;
+
+ + + + + +Cannot vote if the specified multi-step proposal is in execution. + + +
const EMULTI_STEP_PROPOSAL_IN_EXECUTION: u64 = 9;
+
+ + + + + +Proposal cannot be resolved more than once + + +
const EPROPOSAL_ALREADY_RESOLVED: u64 = 3;
+
+ + + + + +Proposal cannot be resolved. Either voting duration has not passed, not enough votes, or fewer yes than no votes + + +
const EPROPOSAL_CANNOT_BE_RESOLVED: u64 = 2;
+
+ + + + + +Proposal cannot contain an empty execution script hash + + +
const EPROPOSAL_EMPTY_EXECUTION_HASH: u64 = 4;
+
+ + + + + +Current script's execution hash does not match the specified proposal's + + +
const EPROPOSAL_EXECUTION_HASH_NOT_MATCHING: u64 = 1;
+
+ + + + + +Cannot call is_multi_step_proposal_in_execution() on single-step proposals. + + +
const EPROPOSAL_IS_SINGLE_STEP: u64 = 12;
+
+ + + + + +Proposal's voting period has already ended. + + +
const EPROPOSAL_VOTING_ALREADY_ENDED: u64 = 5;
+
+ + + + + +Resolution of a proposal cannot happen atomically in the same transaction as the last vote. + + +
const ERESOLUTION_CANNOT_BE_ATOMIC: u64 = 8;
+
+ + + + + +If we call resolve_proposal_v2() to resolve a single-step proposal, the next_execution_hash parameter should be an empty vector. + + +
const ESINGLE_STEP_PROPOSAL_CANNOT_HAVE_NEXT_EXECUTION_HASH: u64 = 11;
+
+ + + + + +Voting forum has already been registered. + + +
const EVOTING_FORUM_ALREADY_REGISTERED: u64 = 6;
+
+ + + + + +Key used to track if the multi-step proposal is in execution / resolving in progress. + + +
const IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY: vector<u8> = [73, 83, 95, 77, 85, 76, 84, 73, 95, 83, 84, 69, 80, 95, 80, 82, 79, 80, 79, 83, 65, 76, 95, 73, 78, 95, 69, 88, 69, 67, 85, 84, 73, 79, 78];
+
+ + + + + +Key used to track if the proposal is multi-step + + +
const IS_MULTI_STEP_PROPOSAL_KEY: vector<u8> = [73, 83, 95, 77, 85, 76, 84, 73, 95, 83, 84, 69, 80, 95, 80, 82, 79, 80, 79, 83, 65, 76, 95, 75, 69, 89];
+
+ + + + + +Proposal has failed because either the min vote threshold is not met or majority voted no. + + +
const PROPOSAL_STATE_FAILED: u64 = 3;
+
+ + + + + +ProposalStateEnum representing proposal state. + + +
const PROPOSAL_STATE_PENDING: u64 = 0;
+
+ + + + + + + +
const PROPOSAL_STATE_SUCCEEDED: u64 = 1;
+
+ + + + + +Key used to track the resolvable time in the proposal's metadata. + + +
const RESOLVABLE_TIME_METADATA_KEY: vector<u8> = [82, 69, 83, 79, 76, 86, 65, 66, 76, 69, 95, 84, 73, 77, 69, 95, 77, 69, 84, 65, 68, 65, 84, 65, 95, 75, 69, 89];
+
+ + + + + +## Function `register` + + + +
public fun register<ProposalType: store>(account: &signer)
+
+ + + +
+Implementation + + +
public fun register<ProposalType: store>(account: &signer) {
+    let addr = signer::address_of(account);
+    assert!(!exists<VotingForum<ProposalType>>(addr), error::already_exists(EVOTING_FORUM_ALREADY_REGISTERED));
+
+    let voting_forum = VotingForum<ProposalType> {
+        next_proposal_id: 0,
+        proposals: table::new<u64, Proposal<ProposalType>>(),
+        events: VotingEvents {
+            create_proposal_events: account::new_event_handle<CreateProposalEvent>(account),
+            register_forum_events: account::new_event_handle<RegisterForumEvent>(account),
+            resolve_proposal_events: account::new_event_handle<ResolveProposal>(account),
+            vote_events: account::new_event_handle<VoteEvent>(account),
+        }
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            RegisterForum {
+                hosting_account: addr,
+                proposal_type_info: type_info::type_of<ProposalType>(),
+            },
+        );
+    };
+    event::emit_event<RegisterForumEvent>(
+        &mut voting_forum.events.register_forum_events,
+        RegisterForumEvent {
+            hosting_account: addr,
+            proposal_type_info: type_info::type_of<ProposalType>(),
+        },
+    );
+
+    move_to(account, voting_forum);
+}
+
+ + + +
+ + + +## Function `create_proposal` + +Create a single-step proposal with the given parameters + +@param voting_forum_address The forum's address where the proposal will be stored. +@param execution_content The execution content that will be given back at resolution time. This can contain +data such as a capability resource used to scope the execution. +@param execution_hash The hash for the execution script module. Only the same exact script module can resolve +this proposal. +@param min_vote_threshold The minimum number of votes needed to consider this proposal successful. +@param expiration_secs The time in seconds at which the proposal expires and can potentially be resolved. +@param early_resolution_vote_threshold The vote threshold for early resolution of this proposal. +@param metadata A simple_map that stores information about this proposal. +@return The proposal id. + + +
public fun create_proposal<ProposalType: store>(proposer: address, voting_forum_address: address, execution_content: ProposalType, execution_hash: vector<u8>, min_vote_threshold: u128, expiration_secs: u64, early_resolution_vote_threshold: option::Option<u128>, metadata: simple_map::SimpleMap<string::String, vector<u8>>): u64
+
+ + + +
+Implementation + + +
public fun create_proposal<ProposalType: store>(
+    proposer: address,
+    voting_forum_address: address,
+    execution_content: ProposalType,
+    execution_hash: vector<u8>,
+    min_vote_threshold: u128,
+    expiration_secs: u64,
+    early_resolution_vote_threshold: Option<u128>,
+    metadata: SimpleMap<String, vector<u8>>,
+): u64 acquires VotingForum {
+    create_proposal_v2(
+        proposer,
+        voting_forum_address,
+        execution_content,
+        execution_hash,
+        min_vote_threshold,
+        expiration_secs,
+        early_resolution_vote_threshold,
+        metadata,
+        false
+    )
+}
+
+ + + +
+ + + +## Function `create_proposal_v2` + +Create a single-step or a multi-step proposal with the given parameters + +@param voting_forum_address The forum's address where the proposal will be stored. +@param execution_content The execution content that will be given back at resolution time. This can contain +data such as a capability resource used to scope the execution. +@param execution_hash The sha-256 hash for the execution script module. Only the same exact script module can +resolve this proposal. +@param min_vote_threshold The minimum number of votes needed to consider this proposal successful. +@param expiration_secs The time in seconds at which the proposal expires and can potentially be resolved. +@param early_resolution_vote_threshold The vote threshold for early resolution of this proposal. +@param metadata A simple_map that stores information about this proposal. +@param is_multi_step_proposal A bool value that indicates if the proposal is single-step or multi-step. +@return The proposal id. + + +
public fun create_proposal_v2<ProposalType: store>(proposer: address, voting_forum_address: address, execution_content: ProposalType, execution_hash: vector<u8>, min_vote_threshold: u128, expiration_secs: u64, early_resolution_vote_threshold: option::Option<u128>, metadata: simple_map::SimpleMap<string::String, vector<u8>>, is_multi_step_proposal: bool): u64
+
+ + + +
+Implementation + + +
public fun create_proposal_v2<ProposalType: store>(
+    proposer: address,
+    voting_forum_address: address,
+    execution_content: ProposalType,
+    execution_hash: vector<u8>,
+    min_vote_threshold: u128,
+    expiration_secs: u64,
+    early_resolution_vote_threshold: Option<u128>,
+    metadata: SimpleMap<String, vector<u8>>,
+    is_multi_step_proposal: bool,
+): u64 acquires VotingForum {
+    if (option::is_some(&early_resolution_vote_threshold)) {
+        assert!(
+            min_vote_threshold <= *option::borrow(&early_resolution_vote_threshold),
+            error::invalid_argument(EINVALID_MIN_VOTE_THRESHOLD),
+        );
+    };
+    // Make sure the execution script's hash is not empty.
+    assert!(vector::length(&execution_hash) > 0, error::invalid_argument(EPROPOSAL_EMPTY_EXECUTION_HASH));
+
+    let voting_forum = borrow_global_mut<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal_id = voting_forum.next_proposal_id;
+    voting_forum.next_proposal_id = voting_forum.next_proposal_id + 1;
+
+    // Add a flag to indicate if this proposal is single-step or multi-step.
+    simple_map::add(&mut metadata, utf8(IS_MULTI_STEP_PROPOSAL_KEY), to_bytes(&is_multi_step_proposal));
+
+    let is_multi_step_in_execution_key = utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    if (is_multi_step_proposal) {
+        // If the given proposal is a multi-step proposal, we will add a flag to indicate if this multi-step proposal is in execution.
+        // This value is by default false. We turn this value to true when we start executing the multi-step proposal. This value
+        // will be used to disable further voting after we started executing the multi-step proposal.
+        simple_map::add(&mut metadata, is_multi_step_in_execution_key, to_bytes(&false));
+        // If the proposal is a single-step proposal, we check if the metadata passed by the client has the IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY key.
+        // If they have the key, we will remove it, because a single-step proposal that doesn't need this key.
+    } else if (simple_map::contains_key(&mut metadata, &is_multi_step_in_execution_key)) {
+        simple_map::remove(&mut metadata, &is_multi_step_in_execution_key);
+    };
+
+    table::add(&mut voting_forum.proposals, proposal_id, Proposal {
+        proposer,
+        creation_time_secs: timestamp::now_seconds(),
+        execution_content: option::some<ProposalType>(execution_content),
+        execution_hash,
+        metadata,
+        min_vote_threshold,
+        expiration_secs,
+        early_resolution_vote_threshold,
+        yes_votes: 0,
+        no_votes: 0,
+        is_resolved: false,
+        resolution_time_secs: 0,
+    });
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CreateProposal {
+                proposal_id,
+                early_resolution_vote_threshold,
+                execution_hash,
+                expiration_secs,
+                metadata,
+                min_vote_threshold,
+            },
+        );
+    };
+    event::emit_event<CreateProposalEvent>(
+        &mut voting_forum.events.create_proposal_events,
+        CreateProposalEvent {
+            proposal_id,
+            early_resolution_vote_threshold,
+            execution_hash,
+            expiration_secs,
+            metadata,
+            min_vote_threshold,
+        },
+    );
+
+    proposal_id
+}
+
+ + + +
+ + + +## Function `vote` + +Vote on the given proposal. + +@param _proof Required so only the governance module that defines ProposalType can initiate voting. +This guarantees that voting eligibility and voting power are controlled by the right governance. +@param voting_forum_address The address of the forum where the proposals are stored. +@param proposal_id The proposal id. +@param num_votes Number of votes. Voting power should be calculated by governance. +@param should_pass Whether the votes are for yes or no. + + +
public fun vote<ProposalType: store>(_proof: &ProposalType, voting_forum_address: address, proposal_id: u64, num_votes: u64, should_pass: bool)
+
+ + + +
+Implementation + + +
public fun vote<ProposalType: store>(
+    _proof: &ProposalType,
+    voting_forum_address: address,
+    proposal_id: u64,
+    num_votes: u64,
+    should_pass: bool,
+) acquires VotingForum {
+    let voting_forum = borrow_global_mut<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal = table::borrow_mut(&mut voting_forum.proposals, proposal_id);
+    // Voting might still be possible after the proposal has enough yes votes to be resolved early. This would only
+    // lead to possible proposal resolution failure if the resolve early threshold is not definitive (e.g. < 50% + 1
+    // of the total voting token's supply). In this case, more voting might actually still be desirable.
+    // Governance mechanisms built on this voting module can apply additional rules on when voting is closed as
+    // appropriate.
+    assert!(!is_voting_period_over(proposal), error::invalid_state(EPROPOSAL_VOTING_ALREADY_ENDED));
+    assert!(!proposal.is_resolved, error::invalid_state(EPROPOSAL_ALREADY_RESOLVED));
+    // Assert this proposal is single-step, or if the proposal is multi-step, it is not in execution yet.
+    assert!(!simple_map::contains_key(&proposal.metadata, &utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY))
+        || *simple_map::borrow(&proposal.metadata, &utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY)) == to_bytes(
+        &false
+    ),
+        error::invalid_state(EMULTI_STEP_PROPOSAL_IN_EXECUTION));
+
+    if (should_pass) {
+        proposal.yes_votes = proposal.yes_votes + (num_votes as u128);
+    } else {
+        proposal.no_votes = proposal.no_votes + (num_votes as u128);
+    };
+
+    // Record the resolvable time to ensure that resolution has to be done non-atomically.
+    let timestamp_secs_bytes = to_bytes(&timestamp::now_seconds());
+    let key = utf8(RESOLVABLE_TIME_METADATA_KEY);
+    if (simple_map::contains_key(&proposal.metadata, &key)) {
+        *simple_map::borrow_mut(&mut proposal.metadata, &key) = timestamp_secs_bytes;
+    } else {
+        simple_map::add(&mut proposal.metadata, key, timestamp_secs_bytes);
+    };
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(Vote { proposal_id, num_votes });
+    };
+    event::emit_event<VoteEvent>(
+        &mut voting_forum.events.vote_events,
+        VoteEvent { proposal_id, num_votes },
+    );
+}
+
+ + + +
+ + + +## Function `is_proposal_resolvable` + +Common checks on if a proposal is resolvable, regardless if the proposal is single-step or multi-step. + + +
fun is_proposal_resolvable<ProposalType: store>(voting_forum_address: address, proposal_id: u64)
+
+ + + +
+Implementation + + +
fun is_proposal_resolvable<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+) acquires VotingForum {
+    let proposal_state = get_proposal_state<ProposalType>(voting_forum_address, proposal_id);
+    assert!(proposal_state == PROPOSAL_STATE_SUCCEEDED, error::invalid_state(EPROPOSAL_CANNOT_BE_RESOLVED));
+
+    let voting_forum = borrow_global_mut<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal = table::borrow_mut(&mut voting_forum.proposals, proposal_id);
+    assert!(!proposal.is_resolved, error::invalid_state(EPROPOSAL_ALREADY_RESOLVED));
+
+    // We need to make sure that the resolution is happening in
+    // a separate transaction from the last vote to guard against any potential flashloan attacks.
+    let resolvable_time = to_u64(*simple_map::borrow(&proposal.metadata, &utf8(RESOLVABLE_TIME_METADATA_KEY)));
+    assert!(timestamp::now_seconds() > resolvable_time, error::invalid_state(ERESOLUTION_CANNOT_BE_ATOMIC));
+
+    assert!(
+        transaction_context::get_script_hash() == proposal.execution_hash,
+        error::invalid_argument(EPROPOSAL_EXECUTION_HASH_NOT_MATCHING),
+    );
+}
+
+ + + +
+ + + +## Function `resolve` + +Resolve a single-step proposal with given id. Can only be done if there are at least as many votes as min required and +there are more yes votes than no. If either of these conditions is not met, this will revert. + +@param voting_forum_address The address of the forum where the proposals are stored. +@param proposal_id The proposal id. + + +
public fun resolve<ProposalType: store>(voting_forum_address: address, proposal_id: u64): ProposalType
+
+ + + +
+Implementation + + +
public fun resolve<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): ProposalType acquires VotingForum {
+    is_proposal_resolvable<ProposalType>(voting_forum_address, proposal_id);
+
+    let voting_forum = borrow_global_mut<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal = table::borrow_mut(&mut voting_forum.proposals, proposal_id);
+
+    // Assert that the specified proposal is not a multi-step proposal.
+    let multi_step_key = utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+    let has_multi_step_key = simple_map::contains_key(&proposal.metadata, &multi_step_key);
+    if (has_multi_step_key) {
+        let is_multi_step_proposal = from_bcs::to_bool(*simple_map::borrow(&proposal.metadata, &multi_step_key));
+        assert!(
+            !is_multi_step_proposal,
+            error::permission_denied(EMULTI_STEP_PROPOSAL_CANNOT_USE_SINGLE_STEP_RESOLVE_FUNCTION)
+        );
+    };
+
+    let resolved_early = can_be_resolved_early(proposal);
+    proposal.is_resolved = true;
+    proposal.resolution_time_secs = timestamp::now_seconds();
+
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            ResolveProposal {
+                proposal_id,
+                yes_votes: proposal.yes_votes,
+                no_votes: proposal.no_votes,
+                resolved_early,
+            },
+        );
+    };
+    event::emit_event<ResolveProposal>(
+        &mut voting_forum.events.resolve_proposal_events,
+        ResolveProposal {
+            proposal_id,
+            yes_votes: proposal.yes_votes,
+            no_votes: proposal.no_votes,
+            resolved_early,
+        },
+    );
+
+    option::extract(&mut proposal.execution_content)
+}
+
+ + + +
+ + + +## Function `resolve_proposal_v2` + +Resolve a single-step or a multi-step proposal with the given id. +Can only be done if there are at least as many votes as min required and +there are more yes votes than no. If either of these conditions is not met, this will revert. + + +@param voting_forum_address The address of the forum where the proposals are stored. +@param proposal_id The proposal id. +@param next_execution_hash The next execution hash if the given proposal is multi-step. + + +
public fun resolve_proposal_v2<ProposalType: store>(voting_forum_address: address, proposal_id: u64, next_execution_hash: vector<u8>)
+
+ + + +
+Implementation + + +
public fun resolve_proposal_v2<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+    next_execution_hash: vector<u8>,
+) acquires VotingForum {
+    is_proposal_resolvable<ProposalType>(voting_forum_address, proposal_id);
+
+    let voting_forum = borrow_global_mut<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal = table::borrow_mut(&mut voting_forum.proposals, proposal_id);
+
+    // Update the IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY key to indicate that the multi-step proposal is in execution.
+    let multi_step_in_execution_key = utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    if (simple_map::contains_key(&proposal.metadata, &multi_step_in_execution_key)) {
+        let is_multi_step_proposal_in_execution_value = simple_map::borrow_mut(
+            &mut proposal.metadata,
+            &multi_step_in_execution_key
+        );
+        *is_multi_step_proposal_in_execution_value = to_bytes(&true);
+    };
+
+    let multi_step_key = utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+    let is_multi_step = simple_map::contains_key(&proposal.metadata, &multi_step_key) && from_bcs::to_bool(
+        *simple_map::borrow(&proposal.metadata, &multi_step_key)
+    );
+    let next_execution_hash_is_empty = vector::length(&next_execution_hash) == 0;
+
+    // Assert that if this proposal is single-step, the `next_execution_hash` parameter is empty.
+    assert!(
+        is_multi_step || next_execution_hash_is_empty,
+        error::invalid_argument(ESINGLE_STEP_PROPOSAL_CANNOT_HAVE_NEXT_EXECUTION_HASH)
+    );
+
+    // If the `next_execution_hash` parameter is empty, it means that either
+    // - this proposal is a single-step proposal, or
+    // - this proposal is multi-step and we're currently resolving the last step in the multi-step proposal.
+    // We can mark that this proposal is resolved.
+    if (next_execution_hash_is_empty) {
+        proposal.is_resolved = true;
+        proposal.resolution_time_secs = timestamp::now_seconds();
+
+        // Set the `IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY` value to false upon successful resolution of the last step of a multi-step proposal.
+        if (is_multi_step) {
+            let is_multi_step_proposal_in_execution_value = simple_map::borrow_mut(
+                &mut proposal.metadata,
+                &multi_step_in_execution_key
+            );
+            *is_multi_step_proposal_in_execution_value = to_bytes(&false);
+        };
+    } else {
+        // If the current step is not the last step,
+        // update the proposal's execution hash on-chain to the execution hash of the next step.
+        proposal.execution_hash = next_execution_hash;
+    };
+
+    // For single-step proposals, we emit one `ResolveProposal` event per proposal.
+    // For multi-step proposals, we emit one `ResolveProposal` event per step in the multi-step proposal. This means
+    // that we emit multiple `ResolveProposal` events for the same multi-step proposal.
+    let resolved_early = can_be_resolved_early(proposal);
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            ResolveProposal {
+                proposal_id,
+                yes_votes: proposal.yes_votes,
+                no_votes: proposal.no_votes,
+                resolved_early,
+            },
+        );
+    };
+    event::emit_event(
+        &mut voting_forum.events.resolve_proposal_events,
+        ResolveProposal {
+            proposal_id,
+            yes_votes: proposal.yes_votes,
+            no_votes: proposal.no_votes,
+            resolved_early,
+        },
+    );
+
+}
+
+ + + +
+ + + +## Function `next_proposal_id` + +Return the next unassigned proposal id + + +
#[view]
+public fun next_proposal_id<ProposalType: store>(voting_forum_address: address): u64
+
+ + + +
+Implementation + + +
public fun next_proposal_id<ProposalType: store>(voting_forum_address: address, ): u64 acquires VotingForum {
+    let voting_forum = borrow_global<VotingForum<ProposalType>>(voting_forum_address);
+    voting_forum.next_proposal_id
+}
+
+ + + +
+ + + +## Function `get_proposer` + + + +
#[view]
+public fun get_proposer<ProposalType: store>(voting_forum_address: address, proposal_id: u64): address
+
+ + + +
+Implementation + + +
public fun get_proposer<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64
+): address acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.proposer
+}
+
+ + + +
+ + + +## Function `is_voting_closed` + + + +
#[view]
+public fun is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
+ + + +
+Implementation + + +
public fun is_voting_closed<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64
+): bool acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    can_be_resolved_early(proposal) || is_voting_period_over(proposal)
+}
+
+ + + +
+ + + +## Function `can_be_resolved_early` + +Return true if the proposal has reached early resolution threshold (if specified). + + +
public fun can_be_resolved_early<ProposalType: store>(proposal: &voting::Proposal<ProposalType>): bool
+
+ + + +
+Implementation + + +
public fun can_be_resolved_early<ProposalType: store>(proposal: &Proposal<ProposalType>): bool {
+    if (option::is_some(&proposal.early_resolution_vote_threshold)) {
+        let early_resolution_threshold = *option::borrow(&proposal.early_resolution_vote_threshold);
+        if (proposal.yes_votes >= early_resolution_threshold || proposal.no_votes >= early_resolution_threshold) {
+            return true
+        };
+    };
+    false
+}
+
+ + + +
+ + + +## Function `get_proposal_metadata` + + + +
#[view]
+public fun get_proposal_metadata<ProposalType: store>(voting_forum_address: address, proposal_id: u64): simple_map::SimpleMap<string::String, vector<u8>>
+
+ + + +
+Implementation + + +
public fun get_proposal_metadata<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): SimpleMap<String, vector<u8>> acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.metadata
+}
+
+ + + +
+ + + +## Function `get_proposal_metadata_value` + + + +
#[view]
+public fun get_proposal_metadata_value<ProposalType: store>(voting_forum_address: address, proposal_id: u64, metadata_key: string::String): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_proposal_metadata_value<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+    metadata_key: String,
+): vector<u8> acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    *simple_map::borrow(&proposal.metadata, &metadata_key)
+}
+
+ + + +
+ + + +## Function `get_proposal_state` + +Return the state of the proposal with given id. + +@param voting_forum_address The address of the forum where the proposals are stored. +@param proposal_id The proposal id. +@return Proposal state as an enum value. + + +
#[view]
+public fun get_proposal_state<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_proposal_state<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): u64 acquires VotingForum {
+    if (is_voting_closed<ProposalType>(voting_forum_address, proposal_id)) {
+        let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+        let yes_votes = proposal.yes_votes;
+        let no_votes = proposal.no_votes;
+
+        if (yes_votes > no_votes && yes_votes + no_votes >= proposal.min_vote_threshold) {
+            PROPOSAL_STATE_SUCCEEDED
+        } else {
+            PROPOSAL_STATE_FAILED
+        }
+    } else {
+        PROPOSAL_STATE_PENDING
+    }
+}
+
+ + + +
+ + + +## Function `get_proposal_creation_secs` + +Return the proposal's creation time. + + +
#[view]
+public fun get_proposal_creation_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_proposal_creation_secs<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): u64 acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.creation_time_secs
+}
+
+ + + +
+ + + +## Function `get_proposal_expiration_secs` + +Return the proposal's expiration time. + + +
#[view]
+public fun get_proposal_expiration_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_proposal_expiration_secs<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): u64 acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.expiration_secs
+}
+
+ + + +
+ + + +## Function `get_execution_hash` + +Return the proposal's execution hash. + + +
#[view]
+public fun get_execution_hash<ProposalType: store>(voting_forum_address: address, proposal_id: u64): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_execution_hash<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): vector<u8> acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.execution_hash
+}
+
+ + + +
+ + + +## Function `get_min_vote_threshold` + +Return the proposal's minimum vote threshold + + +
#[view]
+public fun get_min_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u128
+
+ + + +
+Implementation + + +
public fun get_min_vote_threshold<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): u128 acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.min_vote_threshold
+}
+
+ + + +
+ + + +## Function `get_early_resolution_vote_threshold` + +Return the proposal's early resolution minimum vote threshold (optionally set) + + +
#[view]
+public fun get_early_resolution_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun get_early_resolution_vote_threshold<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): Option<u128> acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.early_resolution_vote_threshold
+}
+
+ + + +
+ + + +## Function `get_votes` + +Return the proposal's current vote count (yes_votes, no_votes) + + +
#[view]
+public fun get_votes<ProposalType: store>(voting_forum_address: address, proposal_id: u64): (u128, u128)
+
+ + + +
+Implementation + + +
public fun get_votes<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): (u128, u128) acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    (proposal.yes_votes, proposal.no_votes)
+}
+
+ + + +
+ + + +## Function `is_resolved` + +Return true if the governance proposal has already been resolved. + + +
#[view]
+public fun is_resolved<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
+ + + +
+Implementation + + +
public fun is_resolved<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): bool acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.is_resolved
+}
+
+ + + +
+ + + +## Function `get_resolution_time_secs` + + + +
#[view]
+public fun get_resolution_time_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_resolution_time_secs<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): u64 acquires VotingForum {
+    let proposal = get_proposal<ProposalType>(voting_forum_address, proposal_id);
+    proposal.resolution_time_secs
+}
+
+ + + +
+ + + +## Function `is_multi_step_proposal_in_execution` + +Return true if the multi-step governance proposal is in execution. + + +
#[view]
+public fun is_multi_step_proposal_in_execution<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
+ + + +
+Implementation + + +
public fun is_multi_step_proposal_in_execution<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): bool acquires VotingForum {
+    let voting_forum = borrow_global<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal = table::borrow(&voting_forum.proposals, proposal_id);
+    let is_multi_step_in_execution_key = utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    assert!(
+        simple_map::contains_key(&proposal.metadata, &is_multi_step_in_execution_key),
+        error::invalid_argument(EPROPOSAL_IS_SINGLE_STEP)
+    );
+    from_bcs::to_bool(*simple_map::borrow(&proposal.metadata, &is_multi_step_in_execution_key))
+}
+
+ + + +
+ + + +## Function `is_voting_period_over` + +Return true if the voting period of the given proposal has already ended. + + +
fun is_voting_period_over<ProposalType: store>(proposal: &voting::Proposal<ProposalType>): bool
+
+ + + +
+Implementation + + +
fun is_voting_period_over<ProposalType: store>(proposal: &Proposal<ProposalType>): bool {
+    timestamp::now_seconds() > proposal.expiration_secs
+}
+
+ + + +
+ + + +## Function `get_proposal` + + + +
fun get_proposal<ProposalType: store>(voting_forum_address: address, proposal_id: u64): &voting::Proposal<ProposalType>
+
+ + + +
+Implementation + + +
inline fun get_proposal<ProposalType: store>(
+    voting_forum_address: address,
+    proposal_id: u64,
+): &Proposal<ProposalType> acquires VotingForum {
+    let voting_forum = borrow_global<VotingForum<ProposalType>>(voting_forum_address);
+    table::borrow(&voting_forum.proposals, proposal_id)
+}
+
+ + + +
+ + + +## Specification + + + + + + +### High-level Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
No.RequirementCriticalityImplementationEnforcement
1The proposal ID in a voting forum is unique and always increases monotonically with each new proposal created for that voting forum.HighThe create_proposal and create_proposal_v2 create a new proposal with a unique ID derived from the voting_forum's next_proposal_id incrementally.Formally verified via create_proposal.
2While voting, it ensures that only the governance module that defines ProposalType may initiate voting and that the proposal under vote exists in the specified voting forum.CriticalThe vote function verifies the eligibility and validity of a proposal before allowing voting. It ensures that only the correct governance module initiates voting. The function checks if the proposal is currently eligible for voting by confirming it has not resolved and the voting period has not ended.Formally verified via vote.
3After resolving a single-step proposal, the corresponding proposal is guaranteed to be marked as successfully resolved.HighUpon invoking the resolve function on a proposal, it undergoes a series of checks to ensure its validity. These include verifying if the proposal exists, is a single-step proposal, and meets the criteria for resolution. If the checks pass, the proposal's is_resolved flag becomes true, indicating a successful resolution.Formally verified via resolve.
4In the context of v2 proposal resolving, both single-step and multi-step proposals are accurately handled. It ensures that for single-step proposals, the next execution hash is empty and resolves the proposal, while for multi-step proposals, it guarantees that the next execution hash corresponds to the hash of the next step, maintaining the integrity of the proposal execution sequence.MediumThe function resolve_proposal_v2 correctly handles both single-step and multi-step proposals. For single-step proposals, it ensures that the next_execution_hash parameter is empty and resolves the proposal. For multi-step proposals, it ensures that the next_execution_hash parameter contains the hash of the next step.Formally verified via resolve_proposal_v2.
+ + + + + +### Module-level Specification + + +
pragma verify = true;
+pragma aborts_if_is_strict;
+
+ + + + + +### Function `register` + + +
public fun register<ProposalType: store>(account: &signer)
+
+ + + + +
let addr = signer::address_of(account);
+aborts_if exists<VotingForum<ProposalType>>(addr);
+aborts_if !exists<account::Account>(addr);
+let register_account = global<account::Account>(addr);
+aborts_if register_account.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM;
+aborts_if register_account.guid_creation_num + 4 > MAX_U64;
+aborts_if !type_info::spec_is_struct<ProposalType>();
+ensures exists<VotingForum<ProposalType>>(addr);
+
+ + + + + +### Function `create_proposal` + + +
public fun create_proposal<ProposalType: store>(proposer: address, voting_forum_address: address, execution_content: ProposalType, execution_hash: vector<u8>, min_vote_threshold: u128, expiration_secs: u64, early_resolution_vote_threshold: option::Option<u128>, metadata: simple_map::SimpleMap<string::String, vector<u8>>): u64
+
+ + + + +
requires chain_status::is_operating();
+include CreateProposalAbortsIfAndEnsures<ProposalType>{is_multi_step_proposal: false};
+// This enforces high-level requirement 1:
+ensures result == old(global<VotingForum<ProposalType>>(voting_forum_address)).next_proposal_id;
+
+ + + + + +### Function `create_proposal_v2` + + +
public fun create_proposal_v2<ProposalType: store>(proposer: address, voting_forum_address: address, execution_content: ProposalType, execution_hash: vector<u8>, min_vote_threshold: u128, expiration_secs: u64, early_resolution_vote_threshold: option::Option<u128>, metadata: simple_map::SimpleMap<string::String, vector<u8>>, is_multi_step_proposal: bool): u64
+
+ + + + +
requires chain_status::is_operating();
+include CreateProposalAbortsIfAndEnsures<ProposalType>;
+ensures result == old(global<VotingForum<ProposalType>>(voting_forum_address)).next_proposal_id;
+
+ + + + + + + +
schema CreateProposalAbortsIfAndEnsures<ProposalType> {
+    voting_forum_address: address;
+    execution_hash: vector<u8>;
+    min_vote_threshold: u128;
+    early_resolution_vote_threshold: Option<u128>;
+    metadata: SimpleMap<String, vector<u8>>;
+    is_multi_step_proposal: bool;
+    let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal_id = voting_forum.next_proposal_id;
+    aborts_if !exists<VotingForum<ProposalType>>(voting_forum_address);
+    aborts_if table::spec_contains(voting_forum.proposals,proposal_id);
+    aborts_if len(early_resolution_vote_threshold.vec) != 0 && min_vote_threshold > early_resolution_vote_threshold.vec[0];
+    aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+    aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    aborts_if len(execution_hash) == 0;
+    let execution_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+    aborts_if simple_map::spec_contains_key(metadata, execution_key);
+    aborts_if voting_forum.next_proposal_id + 1 > MAX_U64;
+    let is_multi_step_in_execution_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    aborts_if is_multi_step_proposal && simple_map::spec_contains_key(metadata, is_multi_step_in_execution_key);
+    let post post_voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+    let post post_metadata = table::spec_get(post_voting_forum.proposals, proposal_id).metadata;
+    ensures post_voting_forum.next_proposal_id == voting_forum.next_proposal_id + 1;
+    ensures table::spec_contains(post_voting_forum.proposals, proposal_id);
+    ensures if (is_multi_step_proposal) {
+        simple_map::spec_get(post_metadata, is_multi_step_in_execution_key) == std::bcs::serialize(false)
+    } else {
+        !simple_map::spec_contains_key(post_metadata, is_multi_step_in_execution_key)
+    };
+}
+
+ + + + + +### Function `vote` + + +
public fun vote<ProposalType: store>(_proof: &ProposalType, voting_forum_address: address, proposal_id: u64, num_votes: u64, should_pass: bool)
+
+ + + + +
requires chain_status::is_operating();
+// This enforces high-level requirement 2:
+aborts_if !exists<VotingForum<ProposalType>>(voting_forum_address);
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+aborts_if is_voting_period_over(proposal);
+aborts_if proposal.is_resolved;
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let execution_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+aborts_if simple_map::spec_contains_key(proposal.metadata, execution_key) &&
+          simple_map::spec_get(proposal.metadata, execution_key) != std::bcs::serialize(false);
+aborts_if if (should_pass) { proposal.yes_votes + num_votes > MAX_U128 } else { proposal.no_votes + num_votes > MAX_U128 };
+aborts_if !std::string::spec_internal_check_utf8(RESOLVABLE_TIME_METADATA_KEY);
+let post post_voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+ensures if (should_pass) {
+    post_proposal.yes_votes == proposal.yes_votes + num_votes
+} else {
+    post_proposal.no_votes == proposal.no_votes + num_votes
+};
+let timestamp_secs_bytes = std::bcs::serialize(timestamp::spec_now_seconds());
+let key = std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY);
+ensures simple_map::spec_get(post_proposal.metadata, key) == timestamp_secs_bytes;
+
+ + + + + +### Function `is_proposal_resolvable` + + +
fun is_proposal_resolvable<ProposalType: store>(voting_forum_address: address, proposal_id: u64)
+
+ + + + +
requires chain_status::is_operating();
+include IsProposalResolvableAbortsIf<ProposalType>;
+
+ + + + + + + +
schema IsProposalResolvableAbortsIf<ProposalType> {
+    voting_forum_address: address;
+    proposal_id: u64;
+    include AbortsIfNotContainProposalID<ProposalType>;
+    let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+    let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+    let voting_closed = spec_is_voting_closed<ProposalType>(voting_forum_address, proposal_id);
+    aborts_if voting_closed && (proposal.yes_votes <= proposal.no_votes || proposal.yes_votes + proposal.no_votes < proposal.min_vote_threshold);
+    aborts_if !voting_closed;
+    aborts_if proposal.is_resolved;
+    aborts_if !std::string::spec_internal_check_utf8(RESOLVABLE_TIME_METADATA_KEY);
+    aborts_if !simple_map::spec_contains_key(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY));
+    aborts_if !from_bcs::deserializable<u64>(simple_map::spec_get(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY)));
+    aborts_if timestamp::spec_now_seconds() <= from_bcs::deserialize<u64>(simple_map::spec_get(proposal.metadata, std::string::spec_utf8(RESOLVABLE_TIME_METADATA_KEY)));
+    aborts_if transaction_context::spec_get_script_hash() != proposal.execution_hash;
+}
+
+ + + + + +### Function `resolve` + + +
public fun resolve<ProposalType: store>(voting_forum_address: address, proposal_id: u64): ProposalType
+
+ + + + +
requires chain_status::is_operating();
+include IsProposalResolvableAbortsIf<ProposalType>;
+aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let multi_step_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+let has_multi_step_key = simple_map::spec_contains_key(proposal.metadata, multi_step_key);
+aborts_if has_multi_step_key && !from_bcs::deserializable<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+aborts_if has_multi_step_key && from_bcs::deserialize<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+let post post_voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+// This enforces high-level requirement 3:
+ensures post_proposal.is_resolved == true;
+ensures post_proposal.resolution_time_secs == timestamp::spec_now_seconds();
+aborts_if option::spec_is_none(proposal.execution_content);
+ensures result == option::spec_borrow(proposal.execution_content);
+ensures option::spec_is_none(post_proposal.execution_content);
+
+ + + + + +### Function `resolve_proposal_v2` + + +
public fun resolve_proposal_v2<ProposalType: store>(voting_forum_address: address, proposal_id: u64, next_execution_hash: vector<u8>)
+
+ + + + +
pragma verify_duration_estimate = 300;
+requires chain_status::is_operating();
+include IsProposalResolvableAbortsIf<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+let post post_voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let post post_proposal = table::spec_get(post_voting_forum.proposals, proposal_id);
+let multi_step_in_execution_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+ensures (simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) && len(next_execution_hash) != 0) ==>
+    simple_map::spec_get(post_proposal.metadata, multi_step_in_execution_key) == std::bcs::serialize(true);
+ensures (simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key) &&
+    (len(next_execution_hash) == 0 && !is_multi_step)) ==>
+    simple_map::spec_get(post_proposal.metadata, multi_step_in_execution_key) == std::bcs::serialize(true);
+let multi_step_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_KEY);
+aborts_if simple_map::spec_contains_key(proposal.metadata, multi_step_key) &&
+    !from_bcs::deserializable<bool>(simple_map::spec_get(proposal.metadata, multi_step_key));
+let is_multi_step = simple_map::spec_contains_key(proposal.metadata, multi_step_key) &&
+    from_bcs::deserialize(simple_map::spec_get(proposal.metadata, multi_step_key));
+aborts_if !is_multi_step && len(next_execution_hash) != 0;
+aborts_if len(next_execution_hash) == 0 && !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if len(next_execution_hash) == 0 && is_multi_step && !simple_map::spec_contains_key(proposal.metadata, multi_step_in_execution_key);
+// This enforces high-level requirement 4:
+ensures len(next_execution_hash) == 0 ==> post_proposal.resolution_time_secs == timestamp::spec_now_seconds();
+ensures len(next_execution_hash) == 0 ==> post_proposal.is_resolved == true;
+ensures (len(next_execution_hash) == 0 && is_multi_step) ==> simple_map::spec_get(post_proposal.metadata, multi_step_in_execution_key) == std::bcs::serialize(false);
+ensures len(next_execution_hash) != 0 ==> post_proposal.execution_hash == next_execution_hash;
+
+ + + + + +### Function `next_proposal_id` + + +
#[view]
+public fun next_proposal_id<ProposalType: store>(voting_forum_address: address): u64
+
+ + + + +
aborts_if !exists<VotingForum<ProposalType>>(voting_forum_address);
+ensures result == global<VotingForum<ProposalType>>(voting_forum_address).next_proposal_id;
+
+ + + + + +### Function `get_proposer` + + +
#[view]
+public fun get_proposer<ProposalType: store>(voting_forum_address: address, proposal_id: u64): address
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.proposer;
+
+ + + + + +### Function `is_voting_closed` + + +
#[view]
+public fun is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
+ + + + +
requires chain_status::is_operating();
+include AbortsIfNotContainProposalID<ProposalType>;
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+ensures result == spec_is_voting_closed<ProposalType>(voting_forum_address, proposal_id);
+
+ + + + + + + +
fun spec_is_voting_closed<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool {
+   let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+   let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+   spec_can_be_resolved_early<ProposalType>(proposal) || is_voting_period_over(proposal)
+}
+
+ + + + + +### Function `can_be_resolved_early` + + +
public fun can_be_resolved_early<ProposalType: store>(proposal: &voting::Proposal<ProposalType>): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_can_be_resolved_early<ProposalType>(proposal);
+
+ + + + + + + +
fun spec_can_be_resolved_early<ProposalType: store>(proposal: Proposal<ProposalType>): bool {
+   if (option::spec_is_some(proposal.early_resolution_vote_threshold)) {
+       let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold);
+       if (proposal.yes_votes >= early_resolution_threshold || proposal.no_votes >= early_resolution_threshold) {
+           true
+       } else{
+           false
+       }
+   } else {
+       false
+   }
+}
+
+ + + + + + + +
fun spec_get_proposal_state<ProposalType>(
+   voting_forum_address: address,
+   proposal_id: u64,
+   voting_forum: VotingForum<ProposalType>
+): u64 {
+   let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+   let voting_closed = spec_is_voting_closed<ProposalType>(voting_forum_address, proposal_id);
+   let proposal_vote_cond = (proposal.yes_votes > proposal.no_votes && proposal.yes_votes + proposal.no_votes >= proposal.min_vote_threshold);
+   if (voting_closed && proposal_vote_cond) {
+       PROPOSAL_STATE_SUCCEEDED
+   } else if (voting_closed && !proposal_vote_cond) {
+       PROPOSAL_STATE_FAILED
+   } else {
+       PROPOSAL_STATE_PENDING
+   }
+}
+
+ + + + + + + +
fun spec_get_proposal_expiration_secs<ProposalType: store>(
+   voting_forum_address: address,
+   proposal_id: u64,
+): u64 {
+   let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+   let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+   proposal.expiration_secs
+}
+
+ + + + + +### Function `get_proposal_metadata` + + +
#[view]
+public fun get_proposal_metadata<ProposalType: store>(voting_forum_address: address, proposal_id: u64): simple_map::SimpleMap<string::String, vector<u8>>
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.metadata;
+
+ + + + + +### Function `get_proposal_metadata_value` + + +
#[view]
+public fun get_proposal_metadata_value<ProposalType: store>(voting_forum_address: address, proposal_id: u64, metadata_key: string::String): vector<u8>
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+aborts_if !simple_map::spec_contains_key(proposal.metadata, metadata_key);
+ensures result == simple_map::spec_get(proposal.metadata, metadata_key);
+
+ + + + + +### Function `get_proposal_state` + + +
#[view]
+public fun get_proposal_state<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + + +
pragma addition_overflow_unchecked;
+requires chain_status::is_operating();
+include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+ensures result == spec_get_proposal_state(voting_forum_address, proposal_id, voting_forum);
+
+ + + + + +### Function `get_proposal_creation_secs` + + +
#[view]
+public fun get_proposal_creation_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.creation_time_secs;
+
+ + + + + +### Function `get_proposal_expiration_secs` + + +
#[view]
+public fun get_proposal_expiration_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+ensures result == spec_get_proposal_expiration_secs<ProposalType>(voting_forum_address, proposal_id);
+
+ + + + + +### Function `get_execution_hash` + + +
#[view]
+public fun get_execution_hash<ProposalType: store>(voting_forum_address: address, proposal_id: u64): vector<u8>
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.execution_hash;
+
+ + + + + +### Function `get_min_vote_threshold` + + +
#[view]
+public fun get_min_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u128
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.min_vote_threshold;
+
+ + + + + +### Function `get_early_resolution_vote_threshold` + + +
#[view]
+public fun get_early_resolution_vote_threshold<ProposalType: store>(voting_forum_address: address, proposal_id: u64): option::Option<u128>
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.early_resolution_vote_threshold;
+
+ + + + + +### Function `get_votes` + + +
#[view]
+public fun get_votes<ProposalType: store>(voting_forum_address: address, proposal_id: u64): (u128, u128)
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result_1 == proposal.yes_votes;
+ensures result_2 == proposal.no_votes;
+
+ + + + + +### Function `is_resolved` + + +
#[view]
+public fun is_resolved<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.is_resolved;
+
+ + + + + + + +
schema AbortsIfNotContainProposalID<ProposalType> {
+    proposal_id: u64;
+    voting_forum_address: address;
+    let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+    aborts_if !table::spec_contains(voting_forum.proposals, proposal_id);
+    aborts_if !exists<VotingForum<ProposalType>>(voting_forum_address);
+}
+
+ + + + + +### Function `get_resolution_time_secs` + + +
#[view]
+public fun get_resolution_time_secs<ProposalType: store>(voting_forum_address: address, proposal_id: u64): u64
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals, proposal_id);
+ensures result == proposal.resolution_time_secs;
+
+ + + + + +### Function `is_multi_step_proposal_in_execution` + + +
#[view]
+public fun is_multi_step_proposal_in_execution<ProposalType: store>(voting_forum_address: address, proposal_id: u64): bool
+
+ + + + +
include AbortsIfNotContainProposalID<ProposalType>;
+let voting_forum = global<VotingForum<ProposalType>>(voting_forum_address);
+let proposal = table::spec_get(voting_forum.proposals,proposal_id);
+aborts_if !std::string::spec_internal_check_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+let execution_key = std::string::spec_utf8(IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+aborts_if !simple_map::spec_contains_key(proposal.metadata,execution_key);
+let is_multi_step_in_execution_key = simple_map::spec_get(proposal.metadata,execution_key);
+aborts_if !from_bcs::deserializable<bool>(is_multi_step_in_execution_key);
+ensures result == from_bcs::deserialize<bool>(is_multi_step_in_execution_key);
+
+ + + + + +### Function `is_voting_period_over` + + +
fun is_voting_period_over<ProposalType: store>(proposal: &voting::Proposal<ProposalType>): bool
+
+ + + + +
requires chain_status::is_operating();
+aborts_if false;
+ensures result == (timestamp::spec_now_seconds() > proposal.expiration_secs);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/supra-framework/tests/deflation_token_tests.move b/aptos-move/framework/supra-framework/tests/deflation_token_tests.move index 3178dbdfd4a85..35508a28f407b 100644 --- a/aptos-move/framework/supra-framework/tests/deflation_token_tests.move +++ b/aptos-move/framework/supra-framework/tests/deflation_token_tests.move @@ -152,7 +152,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } @@ -218,7 +218,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::some(withdraw), + option::some(withdraw) ); } @@ -241,7 +241,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } @@ -263,7 +263,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } @@ -303,7 +303,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } diff --git a/aptos-move/framework/supra-framework/tests/native_disaptch_token_tests.move b/aptos-move/framework/supra-framework/tests/native_disaptch_token_tests.move new file mode 100644 index 0000000000000..d53c0d3fa8856 --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/native_disaptch_token_tests.move @@ -0,0 +1,16 @@ +#[test_only] +module supra_framework::native_dispatch_token_tests { + use supra_framework::fungible_asset; + use 0xcafe::native_dispatch_token; + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code=0x10019, location=supra_framework::fungible_asset)] + fun test_native_dispatch_token( + creator: &signer, + ) { + let (creator_ref, _) = fungible_asset::create_test_token(creator); + fungible_asset::init_test_metadata(&creator_ref); + + native_dispatch_token::initialize(creator, &creator_ref); + } +} diff --git a/aptos-move/framework/supra-framework/tests/native_dispatch_token.move b/aptos-move/framework/supra-framework/tests/native_dispatch_token.move new file mode 100644 index 0000000000000..2ec6e921fa17f --- /dev/null +++ b/aptos-move/framework/supra-framework/tests/native_dispatch_token.move @@ -0,0 +1,33 @@ +#[test_only] +module 0xcafe::native_dispatch_token { + use supra_framework::fungible_asset::{FungibleAsset, TransferRef}; + use supra_framework::dispatchable_fungible_asset; + use supra_framework::object::{ConstructorRef, Object}; + use supra_framework::function_info; + + use std::option; + use std::signer; + use std::string; + + public fun initialize(account: &signer, constructor_ref: &ConstructorRef) { + assert!(signer::address_of(account) == @0xcafe, 1); + let withdraw = function_info::new_function_info( + account, + string::utf8(b"native_dispatch_token"), + string::utf8(b"withdraw"), + ); + + dispatchable_fungible_asset::register_dispatch_functions( + constructor_ref, + option::some(withdraw), + option::none(), + option::none(), + ); + } + + public native fun withdraw( + store: Object, + _amount: u64, + transfer_ref: &TransferRef, + ): FungibleAsset; +} diff --git a/aptos-move/framework/supra-framework/tests/nil_op_token.move b/aptos-move/framework/supra-framework/tests/nil_op_token.move index 9116432aa82e8..46e8ef886fc1c 100644 --- a/aptos-move/framework/supra-framework/tests/nil_op_token.move +++ b/aptos-move/framework/supra-framework/tests/nil_op_token.move @@ -21,7 +21,7 @@ module 0xcafe::nil_op_token { constructor_ref, option::some(withdraw), option::none(), - option::none() + option::none(), ); } diff --git a/aptos-move/framework/supra-framework/tests/permissioned_token.move b/aptos-move/framework/supra-framework/tests/permissioned_token.move index 2e0c7065ce750..97007a076b06f 100644 --- a/aptos-move/framework/supra-framework/tests/permissioned_token.move +++ b/aptos-move/framework/supra-framework/tests/permissioned_token.move @@ -31,7 +31,7 @@ module 0xcafe::permissioned_token { constructor_ref, option::some(withdraw), option::none(), - option::none() + option::none(), ); } diff --git a/aptos-move/framework/supra-framework/tests/simple_dispatchable_token.move b/aptos-move/framework/supra-framework/tests/simple_dispatchable_token.move index 3e13e10154582..cfbcd971c337b 100644 --- a/aptos-move/framework/supra-framework/tests/simple_dispatchable_token.move +++ b/aptos-move/framework/supra-framework/tests/simple_dispatchable_token.move @@ -28,7 +28,7 @@ module 0xcafe::simple_token { constructor_ref, option::some(withdraw), option::some(deposit), - option::none(), + option::none() ); } diff --git a/aptos-move/framework/supra-framework/tests/simple_dispatchable_token_pfs_tests.move b/aptos-move/framework/supra-framework/tests/simple_dispatchable_token_pfs_tests.move index 5bb30e2427cfa..389a11d50b728 100644 --- a/aptos-move/framework/supra-framework/tests/simple_dispatchable_token_pfs_tests.move +++ b/aptos-move/framework/supra-framework/tests/simple_dispatchable_token_pfs_tests.move @@ -28,7 +28,7 @@ module supra_framework::simple_token_pfs_tests { // User 2 burns their primary store but should still be able to transfer afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); // Balance still works assert!(balance(user_2_address, metadata) == 80, 0); @@ -54,7 +54,7 @@ module supra_framework::simple_token_pfs_tests { // User 2 burns their primary store but should still be able to withdraw afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); let coins = withdraw(user_2, metadata, 70); assert!(balance(user_2_address, metadata) == 10, 0); diff --git a/aptos-move/framework/supra-framework/tests/ten_x_token.move b/aptos-move/framework/supra-framework/tests/ten_x_token.move index d2304c4d577ec..172ede47c0955 100644 --- a/aptos-move/framework/supra-framework/tests/ten_x_token.move +++ b/aptos-move/framework/supra-framework/tests/ten_x_token.move @@ -6,21 +6,31 @@ module 0xcafe::ten_x_token { use supra_framework::function_info; use std::option; + use std::option::Option; use std::signer; use std::string; public fun initialize(account: &signer, constructor_ref: &ConstructorRef) { assert!(signer::address_of(account) == @0xcafe, 1); - let value = function_info::new_function_info( + let balance_value = function_info::new_function_info( account, string::utf8(b"ten_x_token"), string::utf8(b"derived_balance"), ); + let supply_value = function_info::new_function_info( + account, + string::utf8(b"ten_x_token"), + string::utf8(b"derived_supply"), + ); dispatchable_fungible_asset::register_dispatch_functions( constructor_ref, option::none(), option::none(), - option::some(value) + option::some(balance_value) + ); + dispatchable_fungible_asset::register_derive_supply_dispatch_function( + constructor_ref, + option::some(supply_value) ); } @@ -28,4 +38,12 @@ module 0xcafe::ten_x_token { // Derived value is always 10x! fungible_asset::balance(store) * 10 } + + public fun derived_supply(metadata: Object): Option { + // Derived supply is 10x. + if(option::is_some(&fungible_asset::supply(metadata))) { + return option::some(option::extract(&mut fungible_asset::supply(metadata)) * 10) + }; + option::none() + } } diff --git a/aptos-move/framework/supra-framework/tests/ten_x_token_tests.move b/aptos-move/framework/supra-framework/tests/ten_x_token_tests.move index 40546cf56e930..7dcc1eba548fe 100644 --- a/aptos-move/framework/supra-framework/tests/ten_x_token_tests.move +++ b/aptos-move/framework/supra-framework/tests/ten_x_token_tests.move @@ -19,13 +19,17 @@ module supra_framework::ten_x_token_tests { ten_x_token::initialize(creator, &creator_ref); assert!(fungible_asset::supply(metadata) == option::some(0), 1); + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(0), 2); // Mint let fa = fungible_asset::mint(&mint, 100); - assert!(fungible_asset::supply(metadata) == option::some(100), 2); + assert!(fungible_asset::supply(metadata) == option::some(100), 3); // Deposit will cause an re-entrant call into dispatchable_fungible_asset dispatchable_fungible_asset::deposit(creator_store, fa); // The derived value is 10x - assert!(dispatchable_fungible_asset::derived_balance(creator_store) == 1000, 5); + assert!(dispatchable_fungible_asset::derived_balance(creator_store) == 1000, 4); + + // The derived supply is 10x + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(1000), 5); } } diff --git a/aptos-move/framework/tests/move_prover_tests.rs b/aptos-move/framework/tests/move_prover_tests.rs index 52f323002e43a..9c30ee555ed1d 100644 --- a/aptos-move/framework/tests/move_prover_tests.rs +++ b/aptos-move/framework/tests/move_prover_tests.rs @@ -62,6 +62,7 @@ pub fn run_prover_for_pkg(path_to_pkg: impl Into) { None, skip_attribute_checks, extended_checks::get_all_attribute_names(), + &[], ) .unwrap() } diff --git a/aptos-move/framework/tests/move_unit_test.rs b/aptos-move/framework/tests/move_unit_test.rs index 01a10c23fde07..9c6747e0fcf47 100644 --- a/aptos-move/framework/tests/move_unit_test.rs +++ b/aptos-move/framework/tests/move_unit_test.rs @@ -34,9 +34,10 @@ fn run_tests_for_pkg(path_to_pkg: impl Into) { &pkg_path, build_config.clone(), // TODO(Gas): double check if this is correct - UnitTestingConfig::default_with_bound(Some(100_000)), + UnitTestingConfig::default(), aptos_test_natives(), aptos_test_feature_flags_genesis(), + /* gas limit */ Some(100_000), /* cost_table */ None, /* compute_coverage */ false, &mut std::io::stdout(), @@ -52,9 +53,10 @@ fn run_tests_for_pkg(path_to_pkg: impl Into) { ok = run_move_unit_tests( &pkg_path, build_config, - UnitTestingConfig::default_with_bound(Some(100_000)), + UnitTestingConfig::default(), aptos_test_natives(), aptos_test_feature_flags_genesis(), + /* gas_limit */ Some(100_000), /* cost_table */ None, /* compute_coverage */ false, &mut std::io::stdout(), diff --git a/aptos-move/move-examples/README.md b/aptos-move/move-examples/README.md index 66958e4cf6fbe..626576661fdca 100644 --- a/aptos-move/move-examples/README.md +++ b/aptos-move/move-examples/README.md @@ -10,6 +10,11 @@ To play with these examples: **WARNING:** These Move examples have NOT been audited. If you are using them in a production system, proceed at your own risk. Particular care should be taken with Move examples that contain complex cryptographic code (e.g., `drand`, `veiled_coin`). +# Additional Resources + +- [Aptos Learn](https://learn.aptoslabs.com/code-examples/) provides more step-by-step guides on some high-quality examples. +- We also have another repo [move-by-examples](https://github.com/aptos-labs/move-by-examples). It has more newer examples and is actively maintained. + # Contributing ## Writing a Move example diff --git a/aptos-move/move-examples/hello_blockchain/sources/hello_blockchain.move b/aptos-move/move-examples/hello_blockchain/sources/hello_blockchain.move index f32341fb1f386..aa3c6a7bfdc60 100644 --- a/aptos-move/move-examples/hello_blockchain/sources/hello_blockchain.move +++ b/aptos-move/move-examples/hello_blockchain/sources/hello_blockchain.move @@ -3,6 +3,8 @@ module hello_blockchain::message { use std::signer; use std::string; use supra_framework::event; + #[test_only] + use std::debug; //:!:>resource struct MessageHolder has key { @@ -47,6 +49,9 @@ module hello_blockchain::message { #[test(account = @0x1)] public entry fun sender_can_set_message(account: signer) acquires MessageHolder { + let msg: string::String = string::utf8(b"Running test for sender_can_set_message..."); + debug::print(&msg); + let addr = signer::address_of(&account); supra_framework::account::create_account_for_test(addr); set_message(account, string::utf8(b"Hello, Blockchain")); diff --git a/aptos-move/move-examples/large_packages/README.md b/aptos-move/move-examples/large_packages/README.md index e978bdcc17fbc..8bb08b1c655fd 100644 --- a/aptos-move/move-examples/large_packages/README.md +++ b/aptos-move/move-examples/large_packages/README.md @@ -1,14 +1,43 @@ -This package provides an experimental service for uploading very large modules to the Supra network. To publish using this API, you must divide your metadata and modules across multiple calls into `large_packages::stage_code`. Specifically: +# Aptos Large Packages Framework -* Make sure LargePackages is deployed to your network of choice, you can currently find it on testnet at `0xd20f305e3090a24c00524604dc2a42925a75c67aa6020d33033d516cf0878c4a` -* Compile your package -* Chunk up the metadata and modules and call `large_packages::stage_code` -* In your last call to `large_packages::stage_code` set `publish` to `true` +This module provides a framework for uploading large packages to the Supra network, under standard +accounts or objects. +To publish using this API, you must divide your metadata and modules across multiple calls +into `large_packages::stage_code_chunk`. +In each pass, the caller pushes more code by calling `stage_code_chunk`. +In the final call, the caller can use `stage_code_chunk_and_publish_to_account`, `stage_code_chunk_and_publish_to_object`, or +`stage_code_chunk_and_upgrade_object_code` to upload the final data chunk and publish or upgrade the package on-chain. -The above logic is currently implemented in the Python SDK: `aptos-core/ecosystem/python/sdk/aptos_sdk/package_publisher.py` +The above logic is currently implemented in the Python +SDK: [`aptos-python-sdk`](https://github.com/aptos-labs/aptos-python-sdk/blob/main/aptos_sdk/package_publisher.py). -For validation purposes, this contains a package, `large_package_example` that exceeds the requirements for publishing in a single transaction. +Aptos CLI supports this as well with `--chunked-publish` flag: +- `aptos move publish [OPTIONS] --chunked-publish` +- `aptos move create-object-and-publish-package [OPTIONS] --address-name --chunked-publish` +- `aptos move upgrade-object-package [OPTIONS] --address-name --chunked-publish` -This framework has some limitations: -* There is no consistency checking until the publishing attempt -* Module code is not split across chunks, so if a single module is too big, it won't work +# Usage + +1. **Stage Code Chunks**: + - Call `stage_code_chunk` with the appropriate metadata and code chunks. + - Ensure that `code_indices` are provided from `0` to `last_module_idx`, without any + gaps. + + +2. **Publish or Upgrade**: + - In order to upload the last data chunk and publish the package, call `stage_code_chunk_and_publish_to_account` or `stage_code_chunk_and_publish_to_object`. + + - For object code upgrades, call `stage_code_chunk_and_upgrade_object_code` with the argument `code_object` provided. + +3. **Cleanup**: + - In order to remove `StagingArea` resource from an account, call `cleanup_staging_area`. + +# Notes + +* Make sure LargePackages is deployed to your network of choice, you can currently find it both on + mainnet and testnet at `0xa29df848eebfe5d981f708c2a5b06d31af2be53bbd8ddc94c8523f4b903f7adb` +* Ensure that `code_indices` have no gaps. For example, if code_indices are + provided as [0, 1, 3] (skipping index 2), the inline function `assemble_module_code` will abort + since `StagingArea.last_module_idx` is set as the max value of the provided index + from `code_indices`, and `assemble_module_code` will lookup the `StagingArea.code` SmartTable from + 0 to `StagingArea.last_module_idx` in turn. diff --git a/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move b/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move index 8db4e0bc8daa5..f10fbe510aff9 100644 --- a/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move +++ b/aptos-move/move-examples/large_packages/large_package_example/sources/eight.move @@ -9,7 +9,22 @@ /// Long winded text that goes on and on and on /// Long winded text that goes on and on and on module large_package_example::eight { - public fun long_function(_a_very_long_name: u8, _b_very_long_name: u8, _c_very_long_name: u8, _d_very_long_name: u8, _e_very_long_name: u8): address { + + struct State has key { + value: u64 + } + + public entry fun hello(s: &signer, value: u64) { + move_to(s, State { value }) + } + + public fun long_function( + _a_very_long_name: u8, + _b_very_long_name: u8, + _c_very_long_name: u8, + _d_very_long_name: u8, + _e_very_long_name: u8 + ): address { @0x1 } diff --git a/aptos-move/move-examples/large_packages/sources/large_packages.move b/aptos-move/move-examples/large_packages/sources/large_packages.move index 92d61ebcc48c7..b1be125ca88e2 100644 --- a/aptos-move/move-examples/large_packages/sources/large_packages.move +++ b/aptos-move/move-examples/large_packages/sources/large_packages.move @@ -1,30 +1,84 @@ -/// This provides a framework for uploading large packages. In each pass, the caller pushes more -/// code by calling `stage_code`. In the last call, the caller can set the optoinal `publish` and -/// the package will be published inline, saving an extra transaction and additional storage costs. -/// Currently this module does not support modules that are larger than 63KB as that is the maximum -/// that can fit within a transaction and this framework does not split up individual modules. +/// This provides a framework for uploading large packages to standard accounts or objects. +/// In each pass, the caller pushes more code by calling `stage_code_chunk`. +/// In the final call, the caller can use `stage_code_chunk_and_publish_to_account`, `stage_code_chunk_and_publish_to_object`, or +/// `stage_code_chunk_and_upgrade_object_code` to upload the final data chunk and publish or upgrade the package on-chain. +/// +/// Note that `code_indices` must not have gaps. For example, if `code_indices` are provided as [0, 1, 3] +/// (skipping index 2), the inline function `assemble_module_code` will abort. This is because `StagingArea.last_module_idx` +/// is set to the maximum value from `code_indices`. When `assemble_module_code` iterates over the range from 0 to +/// `StagingArea.last_module_idx`, it expects each index to be present in the `StagingArea.code` SmartTable. +/// Any missing index in this range will cause the function to fail. module large_packages::large_packages { use std::error; + use std::option::{Self, Option}; use std::signer; use std::vector; + use aptos_std::smart_table::{Self, SmartTable}; - use supra_framework::code; + use supra_framework::code::{Self, PackageRegistry}; + use supra_framework::object::{Object}; + use supra_framework::object_code_deployment; /// code_indices and code_chunks should be the same length. const ECODE_MISMATCH: u64 = 1; + /// Object reference should be provided when upgrading object code. + const EMISSING_OBJECT_REFERENCE: u64 = 2; - struct StagingArea has drop, key { + struct StagingArea has key { metadata_serialized: vector, - code: vector>, + code: SmartTable>, + last_module_idx: u64, } - public entry fun stage_code( + public entry fun stage_code_chunk( owner: &signer, - metadata_serialized: vector, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + } + + public entry fun stage_code_chunk_and_publish_to_account( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + publish_to_account(owner, staging_area); + cleanup_staging_area(owner); + } + + public entry fun stage_code_chunk_and_publish_to_object( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + publish_to_object(owner, staging_area); + cleanup_staging_area(owner); + } + + public entry fun stage_code_chunk_and_upgrade_object_code( + owner: &signer, + metadata_chunk: vector, code_indices: vector, code_chunks: vector>, - publish: bool, + code_object: Option>, ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + upgrade_object_code(owner, staging_area, option::extract(&mut code_object)); + cleanup_staging_area(owner); + } + + inline fun stage_code_chunk_internal( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ): &mut StagingArea acquires StagingArea { assert!( vector::length(&code_indices) == vector::length(&code_chunks), error::invalid_argument(ECODE_MISMATCH), @@ -34,42 +88,84 @@ module large_packages::large_packages { if (!exists(owner_address)) { move_to(owner, StagingArea { - metadata_serialized: vector::empty(), - code: vector::empty(), + metadata_serialized: vector[], + code: smart_table::new(), + last_module_idx: 0, }); }; let staging_area = borrow_global_mut(owner_address); - vector::append(&mut staging_area.metadata_serialized, metadata_serialized); - while (!vector::is_empty(&code_chunks)) { - let inner_code = vector::pop_back(&mut code_chunks); - let idx = (vector::pop_back(&mut code_indices) as u64); - while (vector::length(&staging_area.code) <= idx) { - vector::push_back(&mut staging_area.code, vector::empty()); + if (!vector::is_empty(&metadata_chunk)) { + vector::append(&mut staging_area.metadata_serialized, metadata_chunk); + }; + + let i = 0; + while (i < vector::length(&code_chunks)) { + let inner_code = *vector::borrow(&code_chunks, i); + let idx = (*vector::borrow(&code_indices, i) as u64); + + if (smart_table::contains(&staging_area.code, idx)) { + vector::append(smart_table::borrow_mut(&mut staging_area.code, idx), inner_code); + } else { + smart_table::add(&mut staging_area.code, idx, inner_code); + if (idx > staging_area.last_module_idx) { + staging_area.last_module_idx = idx; + } }; - let source_code = vector::borrow_mut(&mut staging_area.code, idx); - vector::append(source_code, inner_code) + i = i + 1; }; - let _ = staging_area; + staging_area + } - if (publish) { - publish_staged_code(owner, owner_address); - move_from(owner_address); - } + inline fun publish_to_account( + publisher: &signer, + staging_area: &mut StagingArea, + ) { + let code = assemble_module_code(staging_area); + code::publish_package_txn(publisher, staging_area.metadata_serialized, code); } - public entry fun cleanup(owner: &signer) acquires StagingArea { - move_from(signer::address_of(owner)); + inline fun publish_to_object( + publisher: &signer, + staging_area: &mut StagingArea, + ) { + let code = assemble_module_code(staging_area); + object_code_deployment::publish(publisher, staging_area.metadata_serialized, code); } - /// Publish code from staging area. - public entry fun publish_staged_code( + inline fun upgrade_object_code( publisher: &signer, - staging_area_address: address, - ) acquires StagingArea { - let staging_area = borrow_global_mut(staging_area_address); - code::publish_package_txn(publisher, staging_area.metadata_serialized, staging_area.code); + staging_area: &mut StagingArea, + code_object: Object, + ) { + let code = assemble_module_code(staging_area); + object_code_deployment::upgrade(publisher, staging_area.metadata_serialized, code, code_object); + } + + inline fun assemble_module_code( + staging_area: &mut StagingArea, + ): vector> { + let last_module_idx = staging_area.last_module_idx; + let code: vector> = vector[]; + let i: u64 = 0; + while (i <= last_module_idx) { + vector::push_back( + &mut code, + *smart_table::borrow(&staging_area.code, i) + ); + i = i + 1; + }; + code + } + + public entry fun cleanup_staging_area(owner: &signer) acquires StagingArea { + let StagingArea { + metadata_serialized: _, + code, + last_module_idx: _, + } = move_from(signer::address_of(owner)); + smart_table::destroy(code); } } diff --git a/aptos-move/move-examples/tests/move_unit_tests.rs b/aptos-move/move-examples/tests/move_unit_tests.rs index a27b93573131d..30459f481dbbc 100644 --- a/aptos-move/move-examples/tests/move_unit_tests.rs +++ b/aptos-move/move-examples/tests/move_unit_tests.rs @@ -41,10 +41,11 @@ pub fn run_tests_for_pkg( }, ..Default::default() }, - UnitTestingConfig::default_with_bound(Some(100_000)), + UnitTestingConfig::default(), // TODO(Gas): we may want to switch to non-zero costs in the future aptos_test_natives(), aptos_test_feature_flags_genesis(), + /* gas limit */ Some(100_000), /* cost_table */ None, /* compute_coverage */ false, &mut std::io::stdout(), diff --git a/aptos-move/mvhashmap/src/types.rs b/aptos-move/mvhashmap/src/types.rs index c5c8b0938e541..62cc81e30eaa6 100644 --- a/aptos-move/mvhashmap/src/types.rs +++ b/aptos-move/mvhashmap/src/types.rs @@ -246,13 +246,13 @@ pub(crate) mod test { use super::*; use aptos_aggregator::delta_change_set::serialize; use aptos_types::{ - access_path::AccessPath, executable::ModulePath, state_store::state_value::StateValue, write_set::{TransactionWrite, WriteOpKind}, }; use bytes::Bytes; use claims::{assert_err, assert_ok_eq}; + use move_core_types::{account_address::AccountAddress, identifier::IdentStr}; use std::{fmt::Debug, hash::Hash, sync::Arc}; #[derive(Clone, Eq, Hash, PartialEq, Debug)] @@ -262,8 +262,15 @@ pub(crate) mod test { ); impl ModulePath for KeyType { - fn module_path(&self) -> Option { - None + fn is_module_path(&self) -> bool { + false + } + + fn from_address_and_module_name( + _address: &AccountAddress, + _module_name: &IdentStr, + ) -> Self { + unreachable!("Irrelevant for test") } } diff --git a/aptos-move/mvhashmap/src/unsync_map.rs b/aptos-move/mvhashmap/src/unsync_map.rs index 539b95100a862..f3bbcef5f404d 100644 --- a/aptos-move/mvhashmap/src/unsync_map.rs +++ b/aptos-move/mvhashmap/src/unsync_map.rs @@ -242,7 +242,7 @@ impl< pub fn fetch_module(&self, key: &K) -> Option> { use MVModulesOutput::*; - debug_assert!(key.module_path().is_some()); + debug_assert!(key.is_module_path()); self.module_map.borrow_mut().get_mut(key).map(|entry| { let hash = entry.1.get_or_insert(module_hash(entry.0.as_ref())); diff --git a/aptos-move/vm-genesis/Cargo.toml b/aptos-move/vm-genesis/Cargo.toml index 59927cdd791a7..77e2b1b99108d 100644 --- a/aptos-move/vm-genesis/Cargo.toml +++ b/aptos-move/vm-genesis/Cargo.toml @@ -21,6 +21,7 @@ aptos-types = { workspace = true } aptos-vm = { workspace = true } bcs = { workspace = true } bytes = { workspace = true } +claims = { workspace = true } move-core-types = { workspace = true } move-vm-runtime = { workspace = true } move-vm-types = { workspace = true } diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index abe567af0d969..6276b7dd40b2b 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -43,6 +43,7 @@ use aptos_vm::{ data_cache::AsMoveResolver, move_vm_ext::{GenesisMoveVM, SessionExt}, }; +use claims::assert_ok; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, @@ -189,7 +190,11 @@ pub fn encode_supra_mainnet_genesis_transaction( emit_new_block_and_epoch_event(&mut session); let configs = vm.genesis_change_set_configs(); - let mut change_set = session.finish(&configs).unwrap(); + let (mut change_set, module_write_set) = session.finish(&configs).unwrap(); + assert_ok!( + module_write_set.is_empty_or_invariant_violation(), + "Modules cannot be published in this session" + ); // Publish the framework, using a different session id, in case both scripts create tables. let state_view = GenesisStateView::new(); @@ -199,9 +204,9 @@ pub fn encode_supra_mainnet_genesis_transaction( new_id[31] = 1; let mut session = vm.new_genesis_session(&resolver, HashValue::new(new_id)); publish_framework(&mut session, framework); - let additional_change_set = session.finish(&configs).unwrap(); + let (additional_change_set, module_write_set) = session.finish(&configs).unwrap(); change_set - .squash_additional_change_set(additional_change_set, &configs) + .squash_additional_change_set(additional_change_set) .unwrap(); // Publishing stdlib should not produce any deltas around aggregators and map to write ops and @@ -217,7 +222,7 @@ pub fn encode_supra_mainnet_genesis_transaction( verify_genesis_write_set(change_set.events()); let change_set = change_set - .try_into_storage_change_set() + .try_combine_into_storage_change_set(module_write_set) .expect("Constructing a ChangeSet from VMChangeSet should always succeed at genesis"); Transaction::GenesisTransaction(WriteSetPayload::Direct(change_set)) } @@ -356,7 +361,11 @@ pub fn encode_genesis_change_set_for_testnet( emit_new_block_and_epoch_event(&mut session); let configs = vm.genesis_change_set_configs(); - let mut change_set = session.finish(&configs).unwrap(); + let (mut change_set, module_write_set) = session.finish(&configs).unwrap(); + assert_ok!( + module_write_set.is_empty_or_invariant_violation(), + "Modules cannot be published in this session" + ); let state_view = GenesisStateView::new(); let resolver = state_view.as_move_resolver(); @@ -366,9 +375,9 @@ pub fn encode_genesis_change_set_for_testnet( new_id[31] = 1; let mut session = vm.new_genesis_session(&resolver, HashValue::new(new_id)); publish_framework(&mut session, framework); - let additional_change_set = session.finish(&configs).unwrap(); + let (additional_change_set, module_write_set) = session.finish(&configs).unwrap(); change_set - .squash_additional_change_set(additional_change_set, &configs) + .squash_additional_change_set(additional_change_set) .unwrap(); // Publishing stdlib should not produce any deltas around aggregators and map to write ops and @@ -383,8 +392,9 @@ pub fn encode_genesis_change_set_for_testnet( .concrete_write_set_iter() .any(|(_, op)| op.expect("expect only concrete write ops").is_deletion())); verify_genesis_write_set(change_set.events()); + change_set - .try_into_storage_change_set() + .try_combine_into_storage_change_set(module_write_set) .expect("Constructing a ChangeSet from VMChangeSet should always succeed at genesis") } diff --git a/aptos-move/writeset-transaction-generator/Cargo.toml b/aptos-move/writeset-transaction-generator/Cargo.toml deleted file mode 100644 index b35659a241013..0000000000000 --- a/aptos-move/writeset-transaction-generator/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "aptos-writeset-generator" -description = "Generating writesets used for incident management" -version = "0.1.0" - -# Workspace inherited keys -authors = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -repository = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -anyhow = { workspace = true } -aptos-crypto = { workspace = true } -aptos-types = { workspace = true } -aptos-vm = { workspace = true } -move-core-types = { workspace = true } -move-vm-runtime = { workspace = true } -move-vm-types = { workspace = true } diff --git a/aptos-move/writeset-transaction-generator/src/lib.rs b/aptos-move/writeset-transaction-generator/src/lib.rs deleted file mode 100644 index e69d8c54014bb..0000000000000 --- a/aptos-move/writeset-transaction-generator/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -mod writeset_builder; - -pub use writeset_builder::{build_changeset, GenesisSession}; diff --git a/aptos-move/writeset-transaction-generator/src/writeset_builder.rs b/aptos-move/writeset-transaction-generator/src/writeset_builder.rs deleted file mode 100644 index 8d3640a8547a3..0000000000000 --- a/aptos-move/writeset-transaction-generator/src/writeset_builder.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use anyhow::format_err; -use aptos_crypto::HashValue; -use aptos_types::{ - account_address::AccountAddress, - account_config::{self, aptos_test_root_address}, - chain_id::ChainId, - state_store::StateView, - transaction::{ChangeSet, Script, Version}, -}; -use aptos_vm::{ - data_cache::AsMoveResolver, - move_vm_ext::{GenesisMoveVM, SessionExt}, -}; -use move_core_types::{ - identifier::Identifier, - language_storage::{ModuleId, TypeTag}, - transaction_argument::convert_txn_args, - value::{serialize_values, MoveValue}, -}; -use move_vm_runtime::module_traversal::{TraversalContext, TraversalStorage}; -use move_vm_types::gas::UnmeteredGasMeter; - -pub struct GenesisSession<'r, 'l>(SessionExt<'r, 'l>); - -impl<'r, 'l> GenesisSession<'r, 'l> { - pub fn exec_func( - &mut self, - module_name: &str, - function_name: &str, - ty_args: Vec, - args: Vec>, - ) { - let traversal_storage = TraversalStorage::new(); - self.0 - .execute_function_bypass_visibility( - &ModuleId::new( - account_config::CORE_CODE_ADDRESS, - Identifier::new(module_name).unwrap(), - ), - &Identifier::new(function_name).unwrap(), - ty_args, - args, - &mut UnmeteredGasMeter, - &mut TraversalContext::new(&traversal_storage), - ) - .unwrap_or_else(|e| { - panic!( - "Error calling {}.{}: {}", - module_name, - function_name, - e.into_vm_status() - ) - }); - } - - pub fn exec_script(&mut self, sender: AccountAddress, script: &Script) { - let mut temp = vec![sender.to_vec()]; - temp.extend(convert_txn_args(script.args())); - let traversal_storage = TraversalStorage::new(); - self.0 - .execute_script( - script.code().to_vec(), - script.ty_args().to_vec(), - temp, - &mut UnmeteredGasMeter, - &mut TraversalContext::new(&traversal_storage), - ) - .unwrap() - } - - fn disable_reconfiguration(&mut self) { - self.exec_func( - "Reconfiguration", - "disable_reconfiguration", - vec![], - serialize_values(&vec![MoveValue::Signer(aptos_test_root_address())]), - ) - } - - fn enable_reconfiguration(&mut self) { - self.exec_func( - "Reconfiguration", - "enable_reconfiguration", - vec![], - serialize_values(&vec![MoveValue::Signer(aptos_test_root_address())]), - ) - } - - pub fn set_aptos_version(&mut self, version: Version) { - self.exec_func( - "AptosVersion", - "set_version", - vec![], - serialize_values(&vec![ - MoveValue::Signer(aptos_test_root_address()), - MoveValue::U64(version), - ]), - ) - } -} - -pub fn build_changeset( - state_view: &S, - procedure: F, - chain_id: ChainId, - genesis_id: HashValue, -) -> ChangeSet -where - F: FnOnce(&mut GenesisSession), -{ - let vm = GenesisMoveVM::new(chain_id); - - let change_set = { - let resolver = state_view.as_move_resolver(); - let mut session = GenesisSession(vm.new_genesis_session(&resolver, genesis_id)); - session.disable_reconfiguration(); - procedure(&mut session); - session.enable_reconfiguration(); - session - .0 - .finish(&vm.genesis_change_set_configs()) - .map_err(|err| format_err!("Unexpected VM Error: {:?}", err)) - .unwrap() - }; - - // Genesis never produces the delta change set. - assert!(change_set.aggregator_v1_delta_set().is_empty()); - change_set - .try_into_storage_change_set() - .expect("Conversion from VMChangeSet into ChangeSet should always succeed") -} diff --git a/aptos-node/Cargo.toml b/aptos-node/Cargo.toml index 5281650c79561..033f7a4dadc0b 100644 --- a/aptos-node/Cargo.toml +++ b/aptos-node/Cargo.toml @@ -40,7 +40,7 @@ aptos-indexer-grpc-table-info = { workspace = true } aptos-infallible = { workspace = true } aptos-inspection-service = { workspace = true } aptos-jwk-consensus = { workspace = true } -aptos-logger = { workspace = true } +aptos-logger = { workspace = true, features = ["node-identity"] } aptos-mempool = { workspace = true } aptos-mempool-notifications = { workspace = true } aptos-network = { workspace = true } @@ -51,8 +51,6 @@ aptos-peer-monitoring-service-client = { workspace = true } aptos-peer-monitoring-service-server = { workspace = true } aptos-peer-monitoring-service-types = { workspace = true } aptos-runtimes = { workspace = true } -aptos-safety-rules = { workspace = true } -aptos-schemadb = { workspace = true } aptos-state-sync-driver = { workspace = true } aptos-storage-interface = { workspace = true } aptos-storage-service-client = { workspace = true } diff --git a/aptos-node/src/consensus.rs b/aptos-node/src/consensus.rs index a4d46059f5399..f1b84c307cbba 100644 --- a/aptos-node/src/consensus.rs +++ b/aptos-node/src/consensus.rs @@ -3,10 +3,20 @@ use crate::{network::ApplicationNetworkInterfaces, services}; use aptos_admin_service::AdminService; +use aptos_channels::aptos_channel::Receiver; use aptos_config::config::NodeConfig; use aptos_consensus::{ consensus_observer::{ - network_message::ConsensusObserverMessage, publisher::ConsensusPublisher, + network::{ + network_events::ConsensusObserverNetworkEvents, + network_handler::{ + ConsensusObserverNetworkHandler, ConsensusObserverNetworkMessage, + ConsensusPublisherNetworkMessage, + }, + observer_client::ConsensusObserverClient, + observer_message::ConsensusObserverMessage, + }, + publisher::consensus_publisher::ConsensusPublisher, }, consensus_provider::start_consensus_observer, network_interface::ConsensusMsg, @@ -17,87 +27,14 @@ use aptos_event_notifications::{ DbBackedOnChainConfig, EventNotificationListener, ReconfigNotificationListener, }; use aptos_jwk_consensus::{start_jwk_consensus_runtime, types::JWKConsensusMsg}; -use aptos_logger::debug; use aptos_mempool::QuorumStoreRequest; -use aptos_safety_rules::safety_rules_manager::load_consensus_key_from_secure_storage; +use aptos_network::application::interface::{NetworkClient, NetworkServiceEvents}; use aptos_storage_interface::DbReaderWriter; use aptos_validator_transaction_pool::VTxnPoolState; use futures::channel::mpsc::Sender; use std::sync::Arc; use tokio::runtime::Runtime; -/// Creates and returns the consensus observer runtime (if either the -/// observer or publisher is enabled). -pub fn create_consensus_observer_runtime( - node_config: &NodeConfig, - consensus_observer_network_interfaces: Option< - ApplicationNetworkInterfaces, - >, - consensus_publisher: Option>, - consensus_notifier: ConsensusNotifier, - consensus_to_mempool_sender: Sender, - db_rw: DbReaderWriter, - consensus_observer_reconfig_subscription: Option< - ReconfigNotificationListener, - >, -) -> Option { - if node_config - .consensus_observer - .is_observer_or_publisher_enabled() - { - // Fetch the network interfaces and reconfig subscription - let consensus_observer_network_interfaces = consensus_observer_network_interfaces - .expect("Consensus observer is enabled, but network interfaces are missing!"); - - // Start the consensus observer runtime - let consensus_observer_runtime = start_consensus_observer( - node_config, - consensus_observer_network_interfaces.network_client, - consensus_observer_network_interfaces.network_service_events, - consensus_publisher, - Arc::new(consensus_notifier), - consensus_to_mempool_sender, - db_rw, - consensus_observer_reconfig_subscription, - ); - Some(consensus_observer_runtime) - } else { - None - } -} - -/// Creates and returns the consensus publisher and runtime (if enabled) -pub fn create_consensus_publisher( - node_config: &NodeConfig, - consensus_observer_network_interfaces: &Option< - ApplicationNetworkInterfaces, - >, -) -> (Option, Option>) { - if node_config.consensus_observer.publisher_enabled { - // Get the network interfaces - let consensus_observer_network_interfaces = consensus_observer_network_interfaces - .as_ref() - .expect("Consensus publisher is enabled, but network interfaces are missing!"); - - // Create the publisher runtime - let runtime = aptos_runtimes::spawn_named_runtime("publisher".into(), None); - - // Create the consensus publisher - let (consensus_publisher, outbound_message_receiver) = ConsensusPublisher::new( - consensus_observer_network_interfaces.network_client.clone(), - node_config.consensus_observer, - ); - - // Start the consensus publisher - runtime.spawn(consensus_publisher.clone().start(outbound_message_receiver)); - - // Return the runtime and publisher - (Some(runtime), Some(Arc::new(consensus_publisher))) - } else { - (None, None) - } -} - /// Creates and starts the consensus runtime (if enabled) pub fn create_consensus_runtime( node_config: &NodeConfig, @@ -136,13 +73,9 @@ pub fn create_dkg_runtime( )>, dkg_network_interfaces: Option>, ) -> (VTxnPoolState, Option) { - let maybe_dkg_dealer_sk = - load_consensus_key_from_secure_storage(&node_config.consensus.safety_rules); - debug!("maybe_dkg_dealer_sk={:?}", maybe_dkg_dealer_sk); - let vtxn_pool = VTxnPoolState::default(); - let dkg_runtime = match (dkg_network_interfaces, maybe_dkg_dealer_sk) { - (Some(interfaces), Ok(dkg_dealer_sk)) => { + let dkg_runtime = match dkg_network_interfaces { + Some(interfaces) => { let ApplicationNetworkInterfaces { network_client, network_service_events, @@ -153,7 +86,7 @@ pub fn create_dkg_runtime( let rb_config = node_config.consensus.rand_rb_config.clone(); let dkg_runtime = start_dkg_runtime( my_addr, - dkg_dealer_sk, + &node_config.consensus.safety_rules, network_client, network_service_events, reconfig_events, @@ -180,15 +113,8 @@ pub fn create_jwk_consensus_runtime( jwk_consensus_network_interfaces: Option>, vtxn_pool: &VTxnPoolState, ) -> Option { - let maybe_jwk_consensus_key = - load_consensus_key_from_secure_storage(&node_config.consensus.safety_rules); - debug!( - "jwk_consensus_key_err={:?}", - maybe_jwk_consensus_key.as_ref().err() - ); - - let jwk_consensus_runtime = match (jwk_consensus_network_interfaces, maybe_jwk_consensus_key) { - (Some(interfaces), Ok(consensus_key)) => { + let jwk_consensus_runtime = match jwk_consensus_network_interfaces { + Some(interfaces) => { let ApplicationNetworkInterfaces { network_client, network_service_events, @@ -199,7 +125,7 @@ pub fn create_jwk_consensus_runtime( let my_addr = node_config.validator_network.as_ref().unwrap().peer_id(); let jwk_consensus_runtime = start_jwk_consensus_runtime( my_addr, - consensus_key, + &node_config.consensus.safety_rules, network_client, network_service_events, reconfig_events, @@ -212,3 +138,168 @@ pub fn create_jwk_consensus_runtime( }; jwk_consensus_runtime } + +/// Creates and starts the consensus observer and publisher (if enabled) +pub fn create_consensus_observer_and_publisher( + node_config: &NodeConfig, + consensus_observer_interfaces: Option>, + consensus_notifier: ConsensusNotifier, + consensus_to_mempool_sender: Sender, + db_rw: DbReaderWriter, + consensus_observer_reconfig_subscription: Option< + ReconfigNotificationListener, + >, +) -> ( + Option, + Option, + Option>, +) { + // If none of the consensus observer or publisher are enabled, return early + if !node_config + .consensus_observer + .is_observer_or_publisher_enabled() + { + return (None, None, None); + } + + // Fetch the consensus observer network client and events + let consensus_observer_interfaces = consensus_observer_interfaces + .expect("Consensus observer is enabled, but the network interfaces are missing!"); + let consensus_observer_client = consensus_observer_interfaces.network_client; + let consensus_observer_events = consensus_observer_interfaces.network_service_events; + + // Create the consensus observer client and network handler + let consensus_observer_client = + Arc::new(ConsensusObserverClient::new(consensus_observer_client)); + let ( + consensus_observer_runtime, + consensus_observer_message_receiver, + consensus_publisher_message_receiver, + ) = create_observer_network_handler(node_config, consensus_observer_events); + + // Create the consensus publisher (if enabled) + let (consensus_publisher_runtime, consensus_publisher) = create_consensus_publisher( + node_config, + consensus_observer_client.clone(), + consensus_publisher_message_receiver, + ); + + // Create the consensus observer (if enabled) + create_consensus_observer( + node_config, + &consensus_observer_runtime, + consensus_observer_client, + consensus_observer_message_receiver, + consensus_publisher.clone(), + consensus_notifier, + consensus_to_mempool_sender, + db_rw, + consensus_observer_reconfig_subscription, + ); + + ( + Some(consensus_observer_runtime), + consensus_publisher_runtime, + consensus_publisher, + ) +} + +/// Creates and starts the consensus observer (if enabled) +fn create_consensus_observer( + node_config: &NodeConfig, + consensus_observer_runtime: &Runtime, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + consensus_observer_message_receiver: Receiver<(), ConsensusObserverNetworkMessage>, + consensus_publisher: Option>, + state_sync_notifier: ConsensusNotifier, + consensus_to_mempool_sender: Sender, + db_rw: DbReaderWriter, + observer_reconfig_subscription: Option>, +) { + // If the observer is not enabled, return early + if !node_config.consensus_observer.observer_enabled { + return; + } + + // Create the consensus observer + start_consensus_observer( + node_config, + consensus_observer_runtime, + consensus_observer_client, + consensus_observer_message_receiver, + consensus_publisher, + Arc::new(state_sync_notifier), + consensus_to_mempool_sender, + db_rw, + observer_reconfig_subscription, + ); +} + +/// Creates and returns the consensus publisher and runtime (if enabled) +fn create_consensus_publisher( + node_config: &NodeConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + publisher_message_receiver: Receiver<(), ConsensusPublisherNetworkMessage>, +) -> (Option, Option>) { + // If the publisher is not enabled, return early + if !node_config.consensus_observer.publisher_enabled { + return (None, None); + } + + // Create the publisher runtime + let runtime = aptos_runtimes::spawn_named_runtime("publisher".into(), None); + + // Create the consensus publisher + let (consensus_publisher, outbound_message_receiver) = + ConsensusPublisher::new(node_config.consensus_observer, consensus_observer_client); + + // Start the consensus publisher + runtime.spawn( + consensus_publisher + .clone() + .start(outbound_message_receiver, publisher_message_receiver), + ); + + // Return the runtime and publisher + (Some(runtime), Some(Arc::new(consensus_publisher))) +} + +/// Creates the consensus observer network handler, and returns the observer +/// runtime, observer message receiver, and publisher message receiver. +fn create_observer_network_handler( + node_config: &NodeConfig, + consensus_observer_events: NetworkServiceEvents, +) -> ( + Runtime, + Receiver<(), ConsensusObserverNetworkMessage>, + Receiver<(), ConsensusPublisherNetworkMessage>, +) { + // Create the consensus observer runtime + let consensus_observer_runtime = aptos_runtimes::spawn_named_runtime("observer".into(), None); + + // Create the consensus observer network events + let consensus_observer_events = ConsensusObserverNetworkEvents::new(consensus_observer_events); + + // Create the consensus observer network handler + let ( + consensus_observer_network_handler, + consensus_observer_message_receiver, + consensus_publisher_message_receiver, + ) = ConsensusObserverNetworkHandler::new( + node_config.consensus_observer, + consensus_observer_events, + ); + + // Start the consensus observer network handler + consensus_observer_runtime.spawn(consensus_observer_network_handler.start()); + + ( + consensus_observer_runtime, + consensus_observer_message_receiver, + consensus_publisher_message_receiver, + ) +} diff --git a/aptos-node/src/lib.rs b/aptos-node/src/lib.rs index a4cab4f7fc792..4634c22c3efb7 100644 --- a/aptos-node/src/lib.rs +++ b/aptos-node/src/lib.rs @@ -20,11 +20,8 @@ use anyhow::anyhow; use aptos_admin_service::AdminService; use aptos_api::bootstrap as bootstrap_api; use aptos_build_info::build_information; -use aptos_config::config::{ - merge_node_config, InitialSafetyRulesConfig, NodeConfig, PersistableConfig, -}; +use aptos_config::config::{merge_node_config, NodeConfig, PersistableConfig}; use aptos_framework::ReleaseBundle; -use aptos_indexer_grpc_table_info::internal_indexer_db_service::InternalIndexerDBService; use aptos_logger::{prelude::*, telemetry_log_writer::TelemetryLog, Level, LoggerFilterUpdater}; use aptos_state_sync_driver::driver_factory::StateSyncRuntimes; use aptos_types::{chain_id::ChainId, on_chain_config::OnChainJWKConsensusConfig}; @@ -513,7 +510,6 @@ where // Configure the validator network let validator_network = node_config.validator_network.as_mut().unwrap(); - validator_network.max_concurrent_network_reqs = 1; validator_network.connectivity_check_interval_ms = 10000; validator_network.max_connection_delay_ms = 10000; validator_network.ping_interval_ms = 10000; @@ -521,7 +517,6 @@ where // Configure the fullnode network let fullnode_network = node_config.full_node_networks.get_mut(0).unwrap(); - fullnode_network.max_concurrent_network_reqs = 1; fullnode_network.connectivity_check_interval_ms = 10000; fullnode_network.max_connection_delay_ms = 10000; fullnode_network.ping_interval_ms = 10000; @@ -610,7 +605,7 @@ pub fn setup_environment_and_start_node( let mut admin_service = services::start_admin_service(&node_config); // Set up the storage database and any RocksDB checkpoints - let (db_rw, backup_service, genesis_waypoint) = + let (db_rw, backup_service, genesis_waypoint, indexer_db_opt) = storage::initialize_database_and_checkpoints(&mut node_config)?; admin_service.set_aptos_db(db_rw.clone().into()); @@ -667,8 +662,6 @@ pub fn setup_environment_and_start_node( db_rw.reader.clone(), ); - let internal_indexer_db = InternalIndexerDBService::get_indexer_db(&node_config); - // Start state sync and get the notification endpoints for mempool and consensus let (aptos_data_client, state_sync_runtimes, mempool_listener, consensus_notifier) = state_sync::start_state_sync_and_get_notification_handles( @@ -677,7 +670,6 @@ pub fn setup_environment_and_start_node( genesis_waypoint, event_subscription_service, db_rw.clone(), - internal_indexer_db.clone(), )?; // Start the node inspection service @@ -695,12 +687,7 @@ pub fn setup_environment_and_start_node( indexer_runtime, indexer_grpc_runtime, internal_indexer_db_runtime, - ) = services::bootstrap_api_and_indexer( - &node_config, - db_rw.clone(), - chain_id, - internal_indexer_db, - )?; + ) = services::bootstrap_api_and_indexer(&node_config, db_rw.clone(), chain_id, indexer_db_opt)?; // Create mempool and get the consensus to mempool sender let (mempool_runtime, consensus_to_mempool_sender) = @@ -714,17 +701,6 @@ pub fn setup_environment_and_start_node( peers_and_metadata, ); - // Ensure consensus key in secure DB. - if !matches!( - node_config - .consensus - .safety_rules - .initial_safety_rules_config, - InitialSafetyRulesConfig::None - ) { - aptos_safety_rules::safety_rules_manager::storage(&node_config.consensus.safety_rules); - } - // Create the DKG runtime and get the VTxn pool let (vtxn_pool, dkg_runtime) = consensus::create_dkg_runtime(&mut node_config, dkg_subscriptions, dkg_network_interfaces); @@ -742,9 +718,16 @@ pub fn setup_environment_and_start_node( state_sync_runtimes.block_until_initialized(); debug!("State sync initialization complete."); - // Create the consensus observer publisher (if enabled) - let (consensus_publisher_runtime, consensus_publisher) = - consensus::create_consensus_publisher(&node_config, &consensus_observer_network_interfaces); + // Create the consensus observer and publisher (if enabled) + let (consensus_observer_runtime, consensus_publisher_runtime, consensus_publisher) = + consensus::create_consensus_observer_and_publisher( + &node_config, + consensus_observer_network_interfaces, + consensus_notifier.clone(), + consensus_to_mempool_sender.clone(), + db_rw.clone(), + consensus_observer_reconfig_subscription, + ); // Create the consensus runtime (if enabled) let consensus_runtime = consensus::create_consensus_runtime( @@ -759,17 +742,6 @@ pub fn setup_environment_and_start_node( &mut admin_service, ); - // Create the consensus observer runtime (if enabled) - let consensus_observer_runtime = consensus::create_consensus_observer_runtime( - &node_config, - consensus_observer_network_interfaces, - consensus_publisher, - consensus_notifier, - consensus_to_mempool_sender, - db_rw, - consensus_observer_reconfig_subscription, - ); - Ok(AptosHandle { _admin_service: admin_service, _api_runtime: api_runtime, diff --git a/aptos-node/src/network.rs b/aptos-node/src/network.rs index 6c2289e225ad8..165c24778bdfb 100644 --- a/aptos-node/src/network.rs +++ b/aptos-node/src/network.rs @@ -8,7 +8,7 @@ use aptos_config::{ network_id::NetworkId, }; use aptos_consensus::{ - consensus_observer, consensus_observer::network_message::ConsensusObserverMessage, + consensus_observer, consensus_observer::network::observer_message::ConsensusObserverMessage, network_interface::ConsensusMsg, }; use aptos_dkg_runtime::DKGMessage; @@ -181,7 +181,9 @@ pub fn consensus_observer_network_configuration( rpc_protocols, aptos_channel::Config::new(max_network_channel_size) .queue_style(QueueStyle::FIFO) - .counters(&consensus_observer::metrics::PENDING_CONSENSUS_OBSERVER_NETWORK_EVENTS), + .counters( + &consensus_observer::common::metrics::PENDING_CONSENSUS_OBSERVER_NETWORK_EVENTS, + ), ); NetworkApplicationConfig::new(network_client_config, network_service_config) } diff --git a/aptos-node/src/services.rs b/aptos-node/src/services.rs index 149897c159925..a6b94bde33bc8 100644 --- a/aptos-node/src/services.rs +++ b/aptos-node/src/services.rs @@ -6,12 +6,13 @@ use aptos_admin_service::AdminService; use aptos_build_info::build_information; use aptos_config::config::NodeConfig; use aptos_consensus::{ - consensus_observer::publisher::ConsensusPublisher, network_interface::ConsensusMsg, - persistent_liveness_storage::StorageWriteProxy, quorum_store::quorum_store_db::QuorumStoreDB, + consensus_observer::publisher::consensus_publisher::ConsensusPublisher, + network_interface::ConsensusMsg, persistent_liveness_storage::StorageWriteProxy, + quorum_store::quorum_store_db::QuorumStoreDB, }; use aptos_consensus_notifications::ConsensusNotifier; use aptos_data_client::client::AptosDataClient; -use aptos_db_indexer::indexer_reader::IndexerReaders; +use aptos_db_indexer::{db_indexer::InternalIndexerDB, indexer_reader::IndexerReaders}; use aptos_event_notifications::{DbBackedOnChainConfig, ReconfigNotificationListener}; use aptos_indexer_grpc_fullnode::runtime::bootstrap as bootstrap_indexer_grpc; use aptos_indexer_grpc_table_info::runtime::{ @@ -27,7 +28,6 @@ use aptos_peer_monitoring_service_server::{ PeerMonitoringServiceServer, }; use aptos_peer_monitoring_service_types::PeerMonitoringServiceMessage; -use aptos_schemadb::DB; use aptos_storage_interface::{DbReader, DbReaderWriter}; use aptos_time_service::TimeService; use aptos_types::{chain_id::ChainId, indexer::indexer_db_reader::IndexerReader}; @@ -45,7 +45,7 @@ pub fn bootstrap_api_and_indexer( node_config: &NodeConfig, db_rw: DbReaderWriter, chain_id: ChainId, - internal_indexer_db: Option>, + internal_indexer_db: Option, ) -> anyhow::Result<( Receiver, Option, diff --git a/aptos-node/src/state_sync.rs b/aptos-node/src/state_sync.rs index c9a8d8370272b..d98667977a0cc 100644 --- a/aptos-node/src/state_sync.rs +++ b/aptos-node/src/state_sync.rs @@ -20,7 +20,6 @@ use aptos_network::application::{ interface::{NetworkClient, NetworkClientInterface, NetworkServiceEvents}, storage::PeersAndMetadata, }; -use aptos_schemadb::DB; use aptos_state_sync_driver::{ driver_factory::{DriverFactory, StateSyncRuntimes}, metadata_storage::PersistentMetadataStorage, @@ -132,7 +131,6 @@ pub fn start_state_sync_and_get_notification_handles( waypoint: Waypoint, event_subscription_service: EventSubscriptionService, db_rw: DbReaderWriter, - internal_indexer_db: Option>, ) -> anyhow::Result<( AptosDataClient, StateSyncRuntimes, @@ -197,7 +195,6 @@ pub fn start_state_sync_and_get_notification_handles( aptos_data_client.clone(), streaming_service_client, TimeService::real(), - internal_indexer_db, ); // Create a new state sync runtime handle diff --git a/aptos-node/src/storage.rs b/aptos-node/src/storage.rs index ab196fa0162c9..0089a7961b2ea 100644 --- a/aptos-node/src/storage.rs +++ b/aptos-node/src/storage.rs @@ -5,7 +5,9 @@ use anyhow::{anyhow, Result}; use aptos_backup_service::start_backup_service; use aptos_config::{config::NodeConfig, utils::get_genesis_txn}; use aptos_db::{fast_sync_storage_wrapper::FastSyncStorageWrapper, AptosDB}; +use aptos_db_indexer::db_indexer::InternalIndexerDB; use aptos_executor::db_bootstrapper::maybe_bootstrap; +use aptos_indexer_grpc_table_info::internal_indexer_db_service::InternalIndexerDBService; use aptos_logger::{debug, info}; use aptos_storage_interface::{DbReader, DbReaderWriter}; use aptos_types::{ledger_info::LedgerInfoWithSignatures, waypoint::Waypoint}; @@ -38,9 +40,15 @@ pub(crate) fn maybe_apply_genesis( #[cfg(not(feature = "consensus-only-perf-test"))] pub(crate) fn bootstrap_db( node_config: &NodeConfig, -) -> Result<(Arc, DbReaderWriter, Option)> { +) -> Result<( + Arc, + DbReaderWriter, + Option, + Option, +)> { + let internal_indexer_db = InternalIndexerDBService::get_indexer_db(node_config); let (aptos_db_reader, db_rw, backup_service) = - match FastSyncStorageWrapper::initialize_dbs(node_config)? { + match FastSyncStorageWrapper::initialize_dbs(node_config, internal_indexer_db.clone())? { Either::Left(db) => { let (db_arc, db_rw) = DbReaderWriter::wrap(db); let db_backup_service = start_backup_service( @@ -76,8 +84,7 @@ pub(crate) fn bootstrap_db( (db_arc as Arc, db_rw, Some(db_backup_service)) }, }; - - Ok((aptos_db_reader, db_rw, backup_service)) + Ok((aptos_db_reader, db_rw, backup_service, internal_indexer_db)) } /// In consensus-only mode, return a in-memory based [FakeAptosDB] and @@ -145,7 +152,12 @@ fn create_rocksdb_checkpoint_and_change_working_dir( /// the various handles. pub fn initialize_database_and_checkpoints( node_config: &mut NodeConfig, -) -> Result<(DbReaderWriter, Option, Waypoint)> { +) -> Result<( + DbReaderWriter, + Option, + Waypoint, + Option, +)> { // If required, create RocksDB checkpoints and change the working directory. // This is test-only. if let Some(working_dir) = node_config.base.working_dir.clone() { @@ -154,7 +166,7 @@ pub fn initialize_database_and_checkpoints( // Open the database let instant = Instant::now(); - let (_aptos_db, db_rw, backup_service) = bootstrap_db(node_config)?; + let (_aptos_db, db_rw, backup_service, indexer_db_opt) = bootstrap_db(node_config)?; // Log the duration to open storage debug!( @@ -166,5 +178,6 @@ pub fn initialize_database_and_checkpoints( db_rw, backup_service, node_config.base.waypoint.genesis_waypoint(), + indexer_db_opt, )) } diff --git a/buildtools/packer/aws-ubuntu.pkr.hcl b/buildtools/packer/aws-ubuntu.pkr.hcl new file mode 100644 index 0000000000000..2a7b64c17a623 --- /dev/null +++ b/buildtools/packer/aws-ubuntu.pkr.hcl @@ -0,0 +1,64 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +variable "region" { + type = string +} + +variable "ami_prefix" { + type = string +} + + +data "amazon-ami" "runs-on-ami-x64" { + filters = { + name = "runs-on-v2.2-ubuntu22-full-x64-*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + # The Runs-On AMI is in the following account + # ref: https://runs-on.com/guides/building-custom-ami-with-packer/ + owners = ["135269210855"] + region = "${var.region}" +} + +source "amazon-ebs" "build-ebs" { + ami_name = "${var.ami_prefix}-runs-on-${formatdate("YYYY-MM-DD-hhmmss", timestamp())}" + instance_type = "c7a.4xlarge" + region = "${var.region}" + source_ami = "${data.amazon-ami.runs-on-ami-x64.id}" + ssh_username = "ubuntu" + user_data_file = "./user_data.sh" +} + +build { + name = "setup-ubuntu-22.04" + sources = [ + "source.amazon-ebs.build-ebs" + ] + + provisioner "file" { + source = "../../rust-toolchain.toml" + destination = "/tmp/rust-toolchain.toml" + } + + provisioner "file" { + source = "../../scripts" + destination = "/tmp/scripts" + } + + provisioner "shell" { + inline = [ + "whoami", + "chmod +x /tmp/scripts/dev_setup.sh", + "sudo -u runner /tmp/scripts/dev_setup.sh -b -r -y -P -J -t -k", + ] + } +} diff --git a/buildtools/packer/user_data.sh b/buildtools/packer/user_data.sh new file mode 100644 index 0000000000000..501b4da455f51 --- /dev/null +++ b/buildtools/packer/user_data.sh @@ -0,0 +1,2 @@ +#!/bin/bash +systemctl start ssh diff --git a/clippy.toml b/clippy.toml index f0b060c599b0c..36f13055e2511 100644 --- a/clippy.toml +++ b/clippy.toml @@ -6,3 +6,5 @@ type-complexity-threshold = 10000 too-many-arguments-threshold = 14 # Reasonably large enum variants are okay enum-variant-size-threshold = 1000 +# Allow unwrap in test +allow-unwrap-in-tests = true diff --git a/config/Cargo.toml b/config/Cargo.toml index b36397d17fb5a..cc9e9b5855cb0 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -28,7 +28,6 @@ cfg-if = { workspace = true } get_if_addrs = { workspace = true } maplit = { workspace = true } num_cpus = { workspace = true } -number_range = { workspace = true } poem-openapi = { workspace = true } rand = { workspace = true } serde = { workspace = true } @@ -47,5 +46,6 @@ tempfile = { workspace = true } default = [] failpoints = [] fuzzing = ["aptos-crypto/fuzzing", "aptos-types/fuzzing"] +smoke-test = [] testing = [] tokio-console = [] diff --git a/config/src/config/config_sanitizer.rs b/config/src/config/config_sanitizer.rs index f14428fcd6958..ffc16c3c871f1 100644 --- a/config/src/config/config_sanitizer.rs +++ b/config/src/config/config_sanitizer.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use super::internal_indexer_db_config::InternalIndexerDBConfig; use crate::config::{ node_config_loader::NodeType, utils::{are_failpoints_enabled, get_config_name}, @@ -62,6 +63,7 @@ impl ConfigSanitizer for NodeConfig { NetbenchConfig::sanitize(node_config, node_type, chain_id)?; StateSyncConfig::sanitize(node_config, node_type, chain_id)?; StorageConfig::sanitize(node_config, node_type, chain_id)?; + InternalIndexerDBConfig::sanitize(node_config, node_type, chain_id)?; sanitize_validator_network_config(node_config, node_type, chain_id)?; Ok(()) // All configs passed validation diff --git a/config/src/config/consensus_config.rs b/config/src/config/consensus_config.rs index 501a80c73f852..90526afc77510 100644 --- a/config/src/config/consensus_config.rs +++ b/config/src/config/consensus_config.rs @@ -2,6 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +use super::DEFEAULT_MAX_BATCH_TXNS; use crate::config::{ config_sanitizer::ConfigSanitizer, node_config_loader::NodeType, Error, NodeConfig, QuorumStoreConfig, ReliableBroadcastConfig, SafetyRulesConfig, BATCH_PADDING_BYTES, @@ -13,11 +14,14 @@ use serde::{Deserialize, Serialize}; use std::path::PathBuf; // NOTE: when changing, make sure to update QuorumStoreBackPressureConfig::backlog_txn_limit_count as well. -const MAX_SENDING_BLOCK_TXNS: u64 = 1900; +const MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING: u64 = 3000; +const MAX_SENDING_BLOCK_TXNS: u64 = 7000; pub(crate) static MAX_RECEIVING_BLOCK_TXNS: Lazy = Lazy::new(|| 10000.max(2 * MAX_SENDING_BLOCK_TXNS)); // stop reducing size at this point, so 1MB transactions can still go through const MIN_BLOCK_BYTES_OVERRIDE: u64 = 1024 * 1024 + BATCH_PADDING_BYTES as u64; +// We should reduce block size only until two QS batch sizes. +const MIN_BLOCK_TXNS_AFTER_FILTERING: u64 = DEFEAULT_MAX_BATCH_TXNS as u64 * 2; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(default, deny_unknown_fields)] @@ -25,6 +29,7 @@ pub struct ConsensusConfig { // length of inbound queue of messages pub max_network_channel_size: usize, pub max_sending_block_txns: u64, + pub max_sending_block_txns_after_filtering: u64, pub max_sending_block_bytes: u64, pub max_sending_inline_txns: u64, pub max_sending_inline_bytes: u64, @@ -63,6 +68,11 @@ pub struct ConsensusConfig { pub intra_consensus_channel_buffer_size: usize, pub quorum_store: QuorumStoreConfig, pub vote_back_pressure_limit: u64, + /// If backpressure target block size is below it, update `max_txns_to_execute` instead. + /// Applied to execution, pipeline and chain health backpressure. + /// Needed as we cannot subsplit QS batches. + pub min_max_txns_in_block_after_filtering_from_backpressure: u64, + pub execution_backpressure: Option, pub pipeline_backpressure: Vec, // Used to decide if backoff is needed. // must match one of the CHAIN_HEALTH_WINDOW_SIZES values. @@ -77,6 +87,10 @@ pub struct ConsensusConfig { pub broadcast_vote: bool, pub proof_cache_capacity: u64, pub rand_rb_config: ReliableBroadcastConfig, + pub num_bounded_executor_tasks: u64, + pub enable_pre_commit: bool, + + pub max_pending_rounds_in_commit_vote_cache: u64, } #[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] @@ -123,10 +137,40 @@ impl Default for DelayedQcAggregatorConfig { } } +/// Execution backpressure which handles gas/s variance, +/// and adjusts block sizes to "recalibrate it" to wanted range. +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct ExecutionBackpressureConfig { + /// Look at execution time for this many last blocks + pub num_blocks_to_look_at: usize, + + /// Only blocks above this threshold are treated as potentially needed recalibration + /// This is needed as small blocks have overheads that are irrelevant to the transactions + /// being executed. + pub min_block_time_ms_to_activate: usize, + + /// Backpressure has a second check, where it only activates if + /// at least `min_blocks_to_activate` are above `min_block_time_ms_to_activate` + pub min_blocks_to_activate: usize, + + /// Out of blocks in the window, take this percentile (from 0-1 range), to use for calibration. + /// i.e. 0.5 means take a median of last `num_blocks_to_look_at` blocks. + pub percentile: f64, + /// Recalibrating max block size, to target blocks taking this long. + pub target_block_time_ms: usize, + /// A minimal number of transactions per block, even if calibration suggests otherwise + /// To make sure backpressure doesn't become too aggressive. + pub min_calibrated_txns_per_block: u64, + // We compute re-calibrated block size, and use that for `max_txns_in_block`. + // But after execution pool and cost of overpacking being minimal - we should + // change so that backpressure sets `max_txns_to_execute` instead +} + #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub struct PipelineBackpressureValues { + // At what latency does this backpressure level activate pub back_pressure_pipeline_latency_limit_ms: u64, - pub max_sending_block_txns_override: u64, + pub max_sending_block_txns_after_filtering_override: u64, pub max_sending_block_bytes_override: u64, // If there is backpressure, giving some more breathing room to go through the backlog, // and making sure rounds don't go extremely fast (even if they are smaller blocks) @@ -134,18 +178,16 @@ pub struct PipelineBackpressureValues { // If we want to dynamically increase it beyond quorum_store_poll_time, // we need to adjust timeouts other nodes use for the backpressured round. pub backpressure_proposal_delay_ms: u64, - pub max_txns_from_block_to_execute: Option, } #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub struct ChainHealthBackoffValues { pub backoff_if_below_participating_voting_power_percentage: usize, - pub max_sending_block_txns_override: u64, + pub max_sending_block_txns_after_filtering_override: u64, pub max_sending_block_bytes_override: u64, pub backoff_proposal_delay_ms: u64, - pub max_txns_from_block_to_execute: Option, } impl Default for ConsensusConfig { @@ -153,6 +195,7 @@ impl Default for ConsensusConfig { ConsensusConfig { max_network_channel_size: 1024, max_sending_block_txns: MAX_SENDING_BLOCK_TXNS, + max_sending_block_txns_after_filtering: MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING, max_sending_block_bytes: 3 * 1024 * 1024, // 3MB max_receiving_block_txns: *MAX_RECEIVING_BLOCK_TXNS, max_sending_inline_txns: 100, @@ -184,6 +227,16 @@ impl Default for ConsensusConfig { // Considering block gas limit and pipeline backpressure should keep number of blocks // in the pipline very low, we can keep this limit pretty low, too. vote_back_pressure_limit: 7, + min_max_txns_in_block_after_filtering_from_backpressure: MIN_BLOCK_TXNS_AFTER_FILTERING, + execution_backpressure: Some(ExecutionBackpressureConfig { + num_blocks_to_look_at: 12, + min_blocks_to_activate: 4, + percentile: 0.5, + target_block_time_ms: 250, + min_block_time_ms_to_activate: 100, + // allow at least two spreading group from reordering in a single block, to utilize paralellism + min_calibrated_txns_per_block: 8, + }), pipeline_backpressure: vec![ PipelineBackpressureValues { // pipeline_latency looks how long has the oldest block still in pipeline @@ -191,120 +244,98 @@ impl Default for ConsensusConfig { // Block enters the pipeline after consensus orders it, and leaves the // pipeline once quorum on execution result among validators has been reached // (so-(badly)-called "commit certificate"), meaning 2f+1 validators have finished execution. - back_pressure_pipeline_latency_limit_ms: 800, - max_sending_block_txns_override: MAX_SENDING_BLOCK_TXNS, + back_pressure_pipeline_latency_limit_ms: 1200, + max_sending_block_txns_after_filtering_override: + MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING, + max_sending_block_bytes_override: 5 * 1024 * 1024, + backpressure_proposal_delay_ms: 50, + }, + PipelineBackpressureValues { + back_pressure_pipeline_latency_limit_ms: 1500, + max_sending_block_txns_after_filtering_override: + MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING, max_sending_block_bytes_override: 5 * 1024 * 1024, backpressure_proposal_delay_ms: 100, - max_txns_from_block_to_execute: None, }, PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 1100, - max_sending_block_txns_override: MAX_SENDING_BLOCK_TXNS, + back_pressure_pipeline_latency_limit_ms: 1900, + max_sending_block_txns_after_filtering_override: + MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING, max_sending_block_bytes_override: 5 * 1024 * 1024, backpressure_proposal_delay_ms: 200, - max_txns_from_block_to_execute: None, }, + // with execution backpressure, only later start reducing block size PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 1400, - max_sending_block_txns_override: 2000, + back_pressure_pipeline_latency_limit_ms: 2500, + max_sending_block_txns_after_filtering_override: 1000, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, backpressure_proposal_delay_ms: 300, - max_txns_from_block_to_execute: None, - }, - PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 1700, - max_sending_block_txns_override: 1000, - max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backpressure_proposal_delay_ms: 400, - max_txns_from_block_to_execute: None, }, PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 2000, - max_sending_block_txns_override: 1000, - max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backpressure_proposal_delay_ms: 500, - max_txns_from_block_to_execute: Some(400), - }, - PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 2300, - max_sending_block_txns_override: 1000, - max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backpressure_proposal_delay_ms: 500, - max_txns_from_block_to_execute: Some(150), - }, - PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 2700, - max_sending_block_txns_override: 1000, + back_pressure_pipeline_latency_limit_ms: 3500, + max_sending_block_txns_after_filtering_override: 200, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backpressure_proposal_delay_ms: 500, - max_txns_from_block_to_execute: Some(50), + backpressure_proposal_delay_ms: 300, }, PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 3100, - max_sending_block_txns_override: 1000, + back_pressure_pipeline_latency_limit_ms: 4500, + max_sending_block_txns_after_filtering_override: 30, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backpressure_proposal_delay_ms: 500, - max_txns_from_block_to_execute: Some(20), + backpressure_proposal_delay_ms: 300, }, PipelineBackpressureValues { - back_pressure_pipeline_latency_limit_ms: 3500, - max_sending_block_txns_override: 1000, - max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backpressure_proposal_delay_ms: 500, + back_pressure_pipeline_latency_limit_ms: 6000, // in practice, latencies and delay make it such that ~2 blocks/s is max, // meaning that most aggressively we limit to ~10 TPS // For transactions that are more expensive than that, we should // instead rely on max gas per block to limit latency. - max_txns_from_block_to_execute: Some(5), + max_sending_block_txns_after_filtering_override: 5, + max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, + backpressure_proposal_delay_ms: 300, }, ], window_for_chain_health: 100, chain_health_backoff: vec![ ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 80, - max_sending_block_txns_override: 10000, + max_sending_block_txns_after_filtering_override: + MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING, max_sending_block_bytes_override: 5 * 1024 * 1024, backoff_proposal_delay_ms: 150, - max_txns_from_block_to_execute: None, }, ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 78, - max_sending_block_txns_override: 2000, + max_sending_block_txns_after_filtering_override: 2000, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, backoff_proposal_delay_ms: 300, - max_txns_from_block_to_execute: None, }, ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 76, - max_sending_block_txns_override: 500, + max_sending_block_txns_after_filtering_override: 500, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, backoff_proposal_delay_ms: 300, - max_txns_from_block_to_execute: None, }, ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 74, - max_sending_block_txns_override: 500, + max_sending_block_txns_after_filtering_override: 100, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backoff_proposal_delay_ms: 500, - max_txns_from_block_to_execute: Some(100), + backoff_proposal_delay_ms: 300, }, ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 72, - max_sending_block_txns_override: 500, + max_sending_block_txns_after_filtering_override: 25, max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backoff_proposal_delay_ms: 500, - max_txns_from_block_to_execute: Some(25), + backoff_proposal_delay_ms: 300, }, ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 70, - max_sending_block_txns_override: 500, - max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, - backoff_proposal_delay_ms: 500, // in practice, latencies and delay make it such that ~2 blocks/s is max, // meaning that most aggressively we limit to ~10 TPS // For transactions that are more expensive than that, we should // instead rely on max gas per block to limit latency. - max_txns_from_block_to_execute: Some(5), + max_sending_block_txns_after_filtering_override: 5, + max_sending_block_bytes_override: MIN_BLOCK_BYTES_OVERRIDE, + backoff_proposal_delay_ms: 300, }, ], @@ -323,6 +354,9 @@ impl Default for ConsensusConfig { backoff_policy_max_delay_ms: 10000, rpc_timeout_ms: 10000, }, + num_bounded_executor_tasks: 16, + enable_pre_commit: true, + max_pending_rounds_in_commit_vote_cache: 100, } } } @@ -360,12 +394,12 @@ impl ConsensusConfig { ( config.max_sending_block_txns, config.max_receiving_block_txns, - "txns", + "send < recv for txns", ), ( config.max_sending_block_bytes, config.max_receiving_block_bytes, - "bytes", + "send < recv for bytes", ), ]; for (send, recv, label) in &send_recv_pairs { @@ -388,46 +422,41 @@ impl ConsensusConfig { ( config.quorum_store.receiver_max_batch_txns as u64, config.max_sending_block_txns, - "txns".to_string(), + "QS recv batch txns < max_sending_block_txns".to_string(), + ), + ( + config.quorum_store.receiver_max_batch_txns as u64, + config.max_sending_block_txns_after_filtering, + "QS recv batch txns < max_sending_block_txns_after_filtering ".to_string(), + ), + ( + config.quorum_store.receiver_max_batch_txns as u64, + config.min_max_txns_in_block_after_filtering_from_backpressure, + "QS recv batch txns < min_max_txns_in_block_after_filtering_from_backpressure" + .to_string(), ), ( config.quorum_store.receiver_max_batch_bytes as u64, config.max_sending_block_bytes, - "bytes".to_string(), + "QS recv batch bytes < max_sending_block_bytes".to_string(), ), ]; for backpressure_values in &config.pipeline_backpressure { - recv_batch_send_block_pairs.push(( - config.quorum_store.receiver_max_batch_txns as u64, - backpressure_values.max_sending_block_txns_override, - format!( - "backpressure {} ms: txns", - backpressure_values.back_pressure_pipeline_latency_limit_ms, - ), - )); recv_batch_send_block_pairs.push(( config.quorum_store.receiver_max_batch_bytes as u64, backpressure_values.max_sending_block_bytes_override, format!( - "backpressure {} ms: bytes", + "backpressure {} ms: QS recv batch bytes < max_sending_block_bytes_override", backpressure_values.back_pressure_pipeline_latency_limit_ms, ), )); } for backoff_values in &config.chain_health_backoff { - recv_batch_send_block_pairs.push(( - config.quorum_store.receiver_max_batch_txns as u64, - backoff_values.max_sending_block_txns_override, - format!( - "backoff {} %: txns", - backoff_values.backoff_if_below_participating_voting_power_percentage, - ), - )); recv_batch_send_block_pairs.push(( config.quorum_store.receiver_max_batch_bytes as u64, backoff_values.max_sending_block_bytes_override, format!( - "backoff {} %: bytes", + "backoff {} %: bytes: QS recv batch bytes < max_sending_block_bytes_override", backoff_values.backoff_if_below_participating_voting_power_percentage, ), )); @@ -637,10 +666,9 @@ mod test { consensus: ConsensusConfig { pipeline_backpressure: vec![PipelineBackpressureValues { back_pressure_pipeline_latency_limit_ms: 0, - max_sending_block_txns_override: 350, + max_sending_block_txns_after_filtering_override: 350, max_sending_block_bytes_override: 0, backpressure_proposal_delay_ms: 0, - max_txns_from_block_to_execute: None, }], quorum_store: QuorumStoreConfig { receiver_max_batch_txns: 250, @@ -668,10 +696,9 @@ mod test { consensus: ConsensusConfig { pipeline_backpressure: vec![PipelineBackpressureValues { back_pressure_pipeline_latency_limit_ms: 0, - max_sending_block_txns_override: 251, + max_sending_block_txns_after_filtering_override: 251, max_sending_block_bytes_override: 100, backpressure_proposal_delay_ms: 0, - max_txns_from_block_to_execute: None, }], quorum_store: QuorumStoreConfig { receiver_max_batch_bytes: 2_000_000, @@ -695,10 +722,9 @@ mod test { consensus: ConsensusConfig { chain_health_backoff: vec![ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 0, - max_sending_block_txns_override: 100, + max_sending_block_txns_after_filtering_override: 100, max_sending_block_bytes_override: 0, backoff_proposal_delay_ms: 0, - max_txns_from_block_to_execute: None, }], quorum_store: QuorumStoreConfig { receiver_max_batch_txns: 251, @@ -722,10 +748,9 @@ mod test { consensus: ConsensusConfig { chain_health_backoff: vec![ChainHealthBackoffValues { backoff_if_below_participating_voting_power_percentage: 0, - max_sending_block_txns_override: 0, + max_sending_block_txns_after_filtering_override: 0, max_sending_block_bytes_override: 100, backoff_proposal_delay_ms: 0, - max_txns_from_block_to_execute: None, }], quorum_store: QuorumStoreConfig { receiver_max_batch_bytes: 2_000_000, diff --git a/config/src/config/consensus_observer_config.rs b/config/src/config/consensus_observer_config.rs index 091b22a8105f2..0ca55c31d50e9 100644 --- a/config/src/config/consensus_observer_config.rs +++ b/config/src/config/consensus_observer_config.rs @@ -30,15 +30,21 @@ pub struct ConsensusObserverConfig { /// Interval (in milliseconds) to garbage collect peer state pub garbage_collection_interval_ms: u64, + /// The maximum number of concurrent subscriptions + pub max_concurrent_subscriptions: u64, + /// Maximum number of blocks to keep in memory (e.g., pending blocks, ordered blocks, etc.) + pub max_num_pending_blocks: u64, /// Maximum timeout (in milliseconds) for active subscriptions pub max_subscription_timeout_ms: u64, /// Maximum timeout (in milliseconds) we'll wait for the synced version to /// increase before terminating the active subscription. pub max_synced_version_timeout_ms: u64, - /// Interval (in milliseconds) to check the optimality of the subscribed peers - pub peer_optimality_check_interval_ms: u64, /// Interval (in milliseconds) to check progress of the consensus observer pub progress_check_interval_ms: u64, + /// Interval (in milliseconds) to check for subscription related peer changes + pub subscription_peer_change_interval_ms: u64, + /// Interval (in milliseconds) to refresh the subscription + pub subscription_refresh_interval_ms: u64, } impl Default for ConsensusObserverConfig { @@ -48,12 +54,15 @@ impl Default for ConsensusObserverConfig { publisher_enabled: false, max_network_channel_size: 1000, max_parallel_serialization_tasks: num_cpus::get(), // Default to the number of CPUs - network_request_timeout_ms: 10_000, // 10 seconds + network_request_timeout_ms: 5_000, // 5 seconds garbage_collection_interval_ms: 60_000, // 60 seconds + max_concurrent_subscriptions: 2, // 2 streams should be sufficient + max_num_pending_blocks: 100, // 100 blocks max_subscription_timeout_ms: 30_000, // 30 seconds max_synced_version_timeout_ms: 60_000, // 60 seconds - peer_optimality_check_interval_ms: 60_000, // 60 seconds progress_check_interval_ms: 5_000, // 5 seconds + subscription_peer_change_interval_ms: 60_000, // 1 minute + subscription_refresh_interval_ms: 300_000, // 5 minutes } } } @@ -102,9 +111,10 @@ impl ConfigOptimizer for ConsensusObserverConfig { } }, NodeType::PublicFullnode => { - if ENABLE_ON_PUBLIC_FULLNODES && !observer_manually_set { - // Only enable the observer for PFNs + if ENABLE_ON_PUBLIC_FULLNODES && !observer_manually_set && !publisher_manually_set { + // Enable both the observer and the publisher for PFNs consensus_observer_config.observer_enabled = true; + consensus_observer_config.publisher_enabled = true; modified_config = true; } }, diff --git a/config/src/config/gas_estimation_config.rs b/config/src/config/gas_estimation_config.rs index 763ac1b947486..00d943e9c1668 100644 --- a/config/src/config/gas_estimation_config.rs +++ b/config/src/config/gas_estimation_config.rs @@ -32,6 +32,8 @@ pub struct GasEstimationConfig { pub aggressive_block_history: usize, /// Time after write when previous value is returned without recomputing pub cache_expiration_ms: u64, + /// Whether to account which TransactionShufflerType is used onchain, and how it affects gas estimation + pub incorporate_reordering_effects: bool, } impl Default for GasEstimationConfig { @@ -44,6 +46,7 @@ impl Default for GasEstimationConfig { market_block_history: 30, aggressive_block_history: 120, cache_expiration_ms: 500, + incorporate_reordering_effects: true, } } } diff --git a/config/src/config/indexer_grpc_config.rs b/config/src/config/indexer_grpc_config.rs index 7b275a6fc15f4..6dcb566194ff4 100644 --- a/config/src/config/indexer_grpc_config.rs +++ b/config/src/config/indexer_grpc_config.rs @@ -90,10 +90,15 @@ impl ConfigSanitizer for IndexerGrpcConfig { return Ok(()); } - if !node_config.storage.enable_indexer && !node_config.indexer_table_info.enabled { + if !node_config.storage.enable_indexer + && !node_config + .indexer_table_info + .table_info_service_mode + .is_enabled() + { return Err(Error::ConfigSanitizerFailed( sanitizer_name, - "storage.enable_indexer or indexer_table_info.enabled must be true if indexer_grpc.enabled is true".to_string(), + "storage.enable_indexer must be true or indexer_table_info.table_info_service_mode must be IndexingOnly if indexer_grpc.enabled is true".to_string(), )); } Ok(()) @@ -120,7 +125,7 @@ impl ConfigOptimizer for IndexerGrpcConfig { #[cfg(test)] mod tests { use super::*; - use crate::config::{IndexerTableInfoConfig, StorageConfig}; + use crate::config::{IndexerTableInfoConfig, StorageConfig, TableInfoServiceMode}; #[test] fn test_sanitize_enable_indexer() { @@ -128,7 +133,7 @@ mod tests { let mut storage_config = StorageConfig::default(); let mut table_info_config = IndexerTableInfoConfig::default(); storage_config.enable_indexer = false; - table_info_config.enabled = false; + table_info_config.table_info_service_mode = TableInfoServiceMode::Disabled; // Create a node config with the indexer enabled, but the storage indexer disabled let mut node_config = NodeConfig { @@ -170,7 +175,7 @@ mod tests { assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); // Enable the table info service - node_config.indexer_table_info.enabled = true; + node_config.indexer_table_info.table_info_service_mode = TableInfoServiceMode::IndexingOnly; // Sanitize the config and verify that it now succeeds IndexerGrpcConfig::sanitize(&node_config, NodeType::Validator, Some(ChainId::mainnet())) diff --git a/config/src/config/indexer_table_info_config.rs b/config/src/config/indexer_table_info_config.rs index b5dca102d02ab..363c2d81a2b7e 100644 --- a/config/src/config/indexer_table_info_config.rs +++ b/config/src/config/indexer_table_info_config.rs @@ -6,20 +6,33 @@ use serde::{Deserialize, Serialize}; // Useful defaults pub const DEFAULT_PARSER_TASK_COUNT: u16 = 20; pub const DEFAULT_PARSER_BATCH_SIZE: u16 = 1000; +pub const DEFAULT_TABLE_INFO_BUCKET: &str = "default-table-info"; + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +pub enum TableInfoServiceMode { + /// Backup service mode with GCS bucket name. + Backup(String), + /// Restore service mode with GCS bucket name. + Restore(String), + IndexingOnly, + Disabled, +} + +impl TableInfoServiceMode { + pub fn is_enabled(&self) -> bool { + !matches!(self, TableInfoServiceMode::Disabled) + } +} #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct IndexerTableInfoConfig { - /// Enable table info parsing - pub enabled: bool, - /// Number of processor tasks to fan out pub parser_task_count: u16, /// Number of transactions each parser will process pub parser_batch_size: u16, - - pub enable_expensive_logging: bool, + pub table_info_service_mode: TableInfoServiceMode, } // Reminder, #[serde(default)] on IndexerTableInfoConfig means that the default values for @@ -28,10 +41,9 @@ pub struct IndexerTableInfoConfig { impl Default for IndexerTableInfoConfig { fn default() -> Self { Self { - enabled: false, parser_task_count: DEFAULT_PARSER_TASK_COUNT, parser_batch_size: DEFAULT_PARSER_BATCH_SIZE, - enable_expensive_logging: false, + table_info_service_mode: TableInfoServiceMode::Disabled, } } } diff --git a/config/src/config/internal_indexer_db_config.rs b/config/src/config/internal_indexer_db_config.rs index b4f415bcc9751..323d02c090b45 100644 --- a/config/src/config/internal_indexer_db_config.rs +++ b/config/src/config/internal_indexer_db_config.rs @@ -1,6 +1,10 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::config::{ + config_sanitizer::ConfigSanitizer, node_config_loader::NodeType, Error, NodeConfig, +}; +use aptos_types::chain_id::ChainId; use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] @@ -58,3 +62,26 @@ impl Default for InternalIndexerDBConfig { } } } + +impl ConfigSanitizer for InternalIndexerDBConfig { + fn sanitize( + node_config: &NodeConfig, + _node_type: NodeType, + _chain_id: Option, + ) -> Result<(), Error> { + let sanitizer_name = Self::get_sanitizer_name(); + let config = node_config.indexer_db_config; + + // Shouldn't turn on internal indexer for db without sharding + if !node_config.storage.rocksdb_configs.enable_storage_sharding + && config.is_internal_indexer_db_enabled() + { + return Err(Error::ConfigSanitizerFailed( + sanitizer_name, + "Don't turn on internal indexer db if DB sharding is off".into(), + )); + } + + Ok(()) + } +} diff --git a/config/src/config/mempool_config.rs b/config/src/config/mempool_config.rs index 8cabf8ba828b8..4aa0f3824aa07 100644 --- a/config/src/config/mempool_config.rs +++ b/config/src/config/mempool_config.rs @@ -11,6 +11,32 @@ use aptos_types::chain_id::ChainId; use serde::{Deserialize, Serialize}; use serde_yaml::Value; +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct LoadBalancingThresholdConfig { + /// PFN load balances the traffic to multiple upstream FNs. The PFN calculates the average mempool traffic in TPS received since + /// the last peer udpate. If the average received mempool traffic is greater than this threshold, then the below limits are used + /// to decide the number of upstream peers to forward the mempool traffic. + pub avg_mempool_traffic_threshold_in_tps: u64, + /// Suppose the smallest ping latency amongst the connected upstream peers is `x`. If the average received mempool traffic is + /// greater than `avg_mempool_traffic_threshold_in_tps`, then the PFN will forward mempool traffic to only those upstream peers + /// with ping latency less than `x + latency_slack_between_top_upstream_peers`. + pub latency_slack_between_top_upstream_peers: u64, + /// If the average received mempool traffic is greater than avg_mempool_traffic_threshold_in_tps, then PFNs will forward to at most + /// `max_number_of_upstream_peers` upstream FNs. + pub max_number_of_upstream_peers: u8, +} + +impl Default for LoadBalancingThresholdConfig { + fn default() -> LoadBalancingThresholdConfig { + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 0, + latency_slack_between_top_upstream_peers: 50, + max_number_of_upstream_peers: 1, + } + } +} + #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct MempoolConfig { @@ -60,6 +86,20 @@ pub struct MempoolConfig { pub broadcast_buckets: Vec, pub eager_expire_threshold_ms: Option, pub eager_expire_time_ms: u64, + /// Uses the BroadcastTransactionsRequestWithReadyTime instead of BroadcastTransactionsRequest when sending + /// mempool transactions to upstream nodes. + pub include_ready_time_in_broadcast: bool, + pub usecase_stats_num_blocks_to_track: usize, + pub usecase_stats_num_top_to_track: usize, + /// We divide the transactions into buckets based on hash of the sender address. + /// This is the number of sender buckets we use. + pub num_sender_buckets: u8, + /// Load balancing configuration for the mempool. This is used only by PFNs. + pub load_balancing_thresholds: Vec, + /// When the load is low, PFNs send all the mempool traffic to only one upstream FN. When the load increases suddenly, PFNs will take + /// up to 10 minutes (shared_mempool_priority_update_interval_secs) to enable the load balancing. If this flag is enabled, + /// then the PFNs will always do load balancing irrespective of the load. + pub enable_max_load_balancing_at_any_load: bool, } impl Default for MempoolConfig { @@ -87,6 +127,43 @@ impl Default for MempoolConfig { broadcast_buckets: DEFAULT_BUCKETS.to_vec(), eager_expire_threshold_ms: Some(15_000), eager_expire_time_ms: 6_000, + include_ready_time_in_broadcast: false, + usecase_stats_num_blocks_to_track: 40, + usecase_stats_num_top_to_track: 5, + num_sender_buckets: 4, + load_balancing_thresholds: vec![ + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 500, + latency_slack_between_top_upstream_peers: 50, + max_number_of_upstream_peers: 2, + }, + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 1000, + latency_slack_between_top_upstream_peers: 50, + max_number_of_upstream_peers: 3, + }, + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 1500, + latency_slack_between_top_upstream_peers: 75, + max_number_of_upstream_peers: 4, + }, + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 2500, + latency_slack_between_top_upstream_peers: 100, + max_number_of_upstream_peers: 5, + }, + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 3500, + latency_slack_between_top_upstream_peers: 125, + max_number_of_upstream_peers: 6, + }, + LoadBalancingThresholdConfig { + avg_mempool_traffic_threshold_in_tps: 4500, + latency_slack_between_top_upstream_peers: 150, + max_number_of_upstream_peers: 7, + }, + ], + enable_max_load_balancing_at_any_load: false, } } } @@ -124,6 +201,11 @@ impl ConfigOptimizer for MempoolConfig { mempool_config.shared_mempool_batch_size = 200; modified_config = true; } + // Set the number of sender buckets for load balancing to 1 (default is 4) + if local_mempool_config_yaml["num_sender_buckets"].is_null() { + mempool_config.num_sender_buckets = 1; + modified_config = true; + } } if node_type.is_validator_fullnode() { // Set the shared_mempool_max_concurrent_inbound_syncs to 16 (default is 4) @@ -137,6 +219,18 @@ impl ConfigOptimizer for MempoolConfig { mempool_config.default_failovers = 0; modified_config = true; } + + // Set the number of sender buckets for load balancing to 1 (default is 4) + if local_mempool_config_yaml["num_sender_buckets"].is_null() { + mempool_config.num_sender_buckets = 1; + modified_config = true; + } + + // Set the include_ready_time_in_broadcast to true (default is false) + if local_mempool_config_yaml["include_ready_time_in_broadcast"].is_null() { + mempool_config.include_ready_time_in_broadcast = true; + modified_config = true; + } } Ok(modified_config) diff --git a/config/src/config/mod.rs b/config/src/config/mod.rs index 6d133ade1c3fe..bc67df4381f09 100644 --- a/config/src/config/mod.rs +++ b/config/src/config/mod.rs @@ -60,7 +60,7 @@ pub use mempool_config::*; pub use netbench_config::*; pub use network_config::*; pub use node_config::*; -pub use node_config_loader::sanitize_node_config; +pub use node_config_loader::{sanitize_node_config, NodeType}; pub use override_node_config::*; pub use peer_monitoring_config::*; pub use persistable_config::*; diff --git a/config/src/config/network_config.rs b/config/src/config/network_config.rs index aa69c13453b88..8ecd0964a6723 100644 --- a/config/src/config/network_config.rs +++ b/config/src/config/network_config.rs @@ -40,7 +40,6 @@ pub const PING_INTERVAL_MS: u64 = 10_000; pub const PING_TIMEOUT_MS: u64 = 20_000; pub const PING_FAILURES_TOLERATED: u64 = 3; pub const CONNECTIVITY_CHECK_INTERVAL_MS: u64 = 5000; -pub const MAX_CONCURRENT_NETWORK_REQS: usize = 100; pub const MAX_CONNECTION_DELAY_MS: u64 = 60_000; /* 1 minute */ pub const MAX_FULLNODE_OUTBOUND_CONNECTIONS: usize = 6; pub const MAX_INBOUND_CONNECTIONS: usize = 100; @@ -65,8 +64,6 @@ pub struct NetworkConfig { pub connectivity_check_interval_ms: u64, /// Size of all network channels pub network_channel_size: usize, - /// Maximum number of concurrent network requests - pub max_concurrent_network_reqs: usize, /// Choose a protocol to discover and dial out to other peers on this network. /// `DiscoveryMethod::None` disables discovery and dialing out (unless you have /// seed peers configured). @@ -153,7 +150,6 @@ impl NetworkConfig { max_connection_delay_ms: MAX_CONNECTION_DELAY_MS, connectivity_check_interval_ms: CONNECTIVITY_CHECK_INTERVAL_MS, network_channel_size: NETWORK_CHANNEL_SIZE, - max_concurrent_network_reqs: MAX_CONCURRENT_NETWORK_REQS, connection_backoff_base: CONNECTION_BACKOFF_BASE, ping_interval_ms: PING_INTERVAL_MS, ping_timeout_ms: PING_TIMEOUT_MS, diff --git a/config/src/config/quorum_store_config.rs b/config/src/config/quorum_store_config.rs index 6e442023b1ffa..c17ab0690ea41 100644 --- a/config/src/config/quorum_store_config.rs +++ b/config/src/config/quorum_store_config.rs @@ -10,6 +10,7 @@ use serde::{Deserialize, Serialize}; use std::time::Duration; pub const BATCH_PADDING_BYTES: usize = 160; +pub const DEFEAULT_MAX_BATCH_TXNS: usize = 250; const DEFAULT_MAX_NUM_BATCHES: usize = 20; #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)] @@ -22,6 +23,7 @@ pub struct QuorumStoreBackPressureConfig { pub decrease_fraction: f64, pub dynamic_min_txn_per_s: u64, pub dynamic_max_txn_per_s: u64, + pub additive_increase_when_no_backpressure: u64, } impl Default for QuorumStoreBackPressureConfig { @@ -37,6 +39,9 @@ impl Default for QuorumStoreBackPressureConfig { decrease_fraction: 0.5, dynamic_min_txn_per_s: 160, dynamic_max_txn_per_s: 12000, + // When the QS is no longer backpressured, we increase number of txns to be pulled from mempool + // by this amount every second until we reach dynamic_max_txn_per_s + additive_increase_when_no_backpressure: 2000, } } } @@ -91,6 +96,7 @@ pub struct QuorumStoreConfig { pub num_workers_for_remote_batches: usize, pub batch_buckets: Vec, pub allow_batches_without_pos_in_proposal: bool, + pub enable_opt_quorum_store: bool, } impl Default for QuorumStoreConfig { @@ -99,9 +105,9 @@ impl Default for QuorumStoreConfig { channel_size: 1000, proof_timeout_ms: 10000, batch_generation_poll_interval_ms: 25, - batch_generation_min_non_empty_interval_ms: 200, + batch_generation_min_non_empty_interval_ms: 100, batch_generation_max_interval_ms: 250, - sender_max_batch_txns: 250, + sender_max_batch_txns: DEFEAULT_MAX_BATCH_TXNS, // TODO: on next release, remove BATCH_PADDING_BYTES sender_max_batch_bytes: 1024 * 1024 - BATCH_PADDING_BYTES, sender_max_num_batches: DEFAULT_MAX_NUM_BATCHES, @@ -129,6 +135,7 @@ impl Default for QuorumStoreConfig { num_workers_for_remote_batches: 10, batch_buckets: DEFAULT_BUCKETS.to_vec(), allow_batches_without_pos_in_proposal: true, + enable_opt_quorum_store: false, } } } diff --git a/config/src/config/safety_rules_config.rs b/config/src/config/safety_rules_config.rs index ca675e3e15ee5..3c7edcfde3197 100644 --- a/config/src/config/safety_rules_config.rs +++ b/config/src/config/safety_rules_config.rs @@ -123,15 +123,22 @@ impl ConfigSanitizer for SafetyRulesConfig { pub enum InitialSafetyRulesConfig { FromFile { identity_blob_path: PathBuf, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + overriding_identity_paths: Vec, waypoint: WaypointConfig, }, None, } impl InitialSafetyRulesConfig { - pub fn from_file(identity_blob_path: PathBuf, waypoint: WaypointConfig) -> Self { + pub fn from_file( + identity_blob_path: PathBuf, + overriding_identity_paths: Vec, + waypoint: WaypointConfig, + ) -> Self { Self::FromFile { identity_blob_path, + overriding_identity_paths, waypoint, } } @@ -160,6 +167,38 @@ impl InitialSafetyRulesConfig { }, } } + + pub fn overriding_identity_blobs(&self) -> anyhow::Result> { + match self { + InitialSafetyRulesConfig::FromFile { + overriding_identity_paths, + .. + } => { + let mut blobs = vec![]; + for path in overriding_identity_paths { + let blob = IdentityBlob::from_file(path)?; + blobs.push(blob); + } + Ok(blobs) + }, + InitialSafetyRulesConfig::None => { + bail!("loading overriding identity blobs failed with missing initial safety rules config") + }, + } + } + + #[cfg(feature = "smoke-test")] + pub fn overriding_identity_blob_paths_mut(&mut self) -> &mut Vec { + match self { + InitialSafetyRulesConfig::FromFile { + overriding_identity_paths, + .. + } => overriding_identity_paths, + InitialSafetyRulesConfig::None => { + unreachable!() + }, + } + } } /// Defines how safety rules should be executed diff --git a/config/src/config/state_sync_config.rs b/config/src/config/state_sync_config.rs index 7dc5b296f1678..2966a42e2e10f 100644 --- a/config/src/config/state_sync_config.rs +++ b/config/src/config/state_sync_config.rs @@ -11,13 +11,13 @@ use serde::{Deserialize, Serialize}; use serde_yaml::Value; // The maximum message size per state sync message -const MAX_MESSAGE_SIZE: usize = 4 * 1024 * 1024; /* 4 MiB */ +const MAX_MESSAGE_SIZE: usize = 8 * 1024 * 1024; /* 8 MiB */ // The maximum chunk sizes for data client requests and response const MAX_EPOCH_CHUNK_SIZE: u64 = 200; const MAX_STATE_CHUNK_SIZE: u64 = 4000; -const MAX_TRANSACTION_CHUNK_SIZE: u64 = 2000; -const MAX_TRANSACTION_OUTPUT_CHUNK_SIZE: u64 = 1000; +const MAX_TRANSACTION_CHUNK_SIZE: u64 = 3000; +const MAX_TRANSACTION_OUTPUT_CHUNK_SIZE: u64 = 3000; // The maximum number of concurrent requests to send const MAX_CONCURRENT_REQUESTS: u64 = 6; @@ -139,7 +139,7 @@ impl Default for StateSyncDriverConfig { max_pending_data_chunks: 50, max_pending_mempool_notifications: 100, max_stream_wait_time_ms: 5000, - num_versions_to_skip_snapshot_sync: 100_000_000, // At 5k TPS, this allows a node to fail for about 6 hours. + num_versions_to_skip_snapshot_sync: 400_000_000, // At 5k TPS, this allows a node to fail for about 24 hours. } } } diff --git a/config/src/config/storage_config.rs b/config/src/config/storage_config.rs index 0234d868489c5..493a012240abb 100644 --- a/config/src/config/storage_config.rs +++ b/config/src/config/storage_config.rs @@ -6,22 +6,23 @@ use crate::{ config::{config_sanitizer::ConfigSanitizer, node_config_loader::NodeType, Error, NodeConfig}, utils, }; -use anyhow::{ensure, Result}; +use anyhow::{bail, ensure, Result}; use aptos_logger::warn; use aptos_types::chain_id::ChainId; use arr_macro::arr; -use number_range::NumberRangeOptions; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, net::{IpAddr, Ipv4Addr, SocketAddr}, path::{Path, PathBuf}, + str::FromStr, }; // Lru cache will consume about 2G RAM based on this default value. pub const DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD: usize = 1 << 13; pub const BUFFERED_STATE_TARGET_ITEMS: usize = 100_000; +pub const BUFFERED_STATE_TARGET_ITEMS_FOR_TEST: usize = 10; #[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] @@ -49,11 +50,7 @@ impl ShardedDbPathConfig { pub fn get_shard_paths(&self) -> Result> { let mut result = HashMap::new(); for shard_path in &self.shard_paths { - let shard_ids = NumberRangeOptions::::new() - .with_list_sep(',') - .with_range_sep('-') - .parse(shard_path.shards.as_str())? - .collect::>(); + let shard_ids = Self::parse(shard_path.shards.as_str())?; let path = &shard_path.path; ensure!( path.is_absolute(), @@ -74,6 +71,31 @@ impl ShardedDbPathConfig { Ok(result) } + + fn parse(path: &str) -> Result> { + let mut shard_ids = vec![]; + for p in path.split(',') { + let num_or_range: Vec<&str> = p.split('-').collect(); + match num_or_range.len() { + 1 => { + let num = u8::from_str(num_or_range[0])?; + ensure!(num < 16); + shard_ids.push(num); + }, + 2 => { + let range_start = u8::from_str(num_or_range[0])?; + let range_end = u8::from_str(num_or_range[1])?; + ensure!(range_start <= range_end && range_end < 16); + for num in range_start..=range_end { + shard_ids.push(num); + } + }, + _ => bail!("Invalid path: {path}."), + } + } + + Ok(shard_ids) + } } /// Port selected RocksDB options for tuning underlying rocksdb instance of AptosDB. diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 4d6f00ed82159..2aa1ee436769c 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -92,6 +92,7 @@ aptos-consensus-types = { workspace = true, features = ["fuzzing"] } aptos-executor-test-helpers = { workspace = true } aptos-keygen = { workspace = true } aptos-mempool = { workspace = true, features = ["fuzzing"] } +aptos-netcore = { workspace = true } aptos-network = { workspace = true, features = ["fuzzing"] } aptos-safety-rules = { workspace = true, features = ["testing"] } aptos-vm = { workspace = true, features = ["fuzzing"] } @@ -100,6 +101,7 @@ claims = { workspace = true } mockall = { workspace = true } move-core-types = { workspace = true } proptest = { workspace = true } +proptest-derive = { workspace = true } tempfile = { workspace = true } [features] @@ -116,3 +118,6 @@ failpoints = ["fail/failpoints"] [package.metadata.cargo-machete] ignored = ["serde_bytes"] + +[lints.clippy] +unwrap_used = "deny" diff --git a/consensus/consensus-types/Cargo.toml b/consensus/consensus-types/Cargo.toml index 097da10ad0369..d671a74cf0a6f 100644 --- a/consensus/consensus-types/Cargo.toml +++ b/consensus/consensus-types/Cargo.toml @@ -23,6 +23,7 @@ aptos-logger = { workspace = true } aptos-short-hex-str = { workspace = true } aptos-types = { workspace = true } bcs = { workspace = true } +derivative = { workspace = true } fail = { workspace = true } futures = { workspace = true } itertools = { workspace = true } diff --git a/consensus/consensus-types/src/block.rs b/consensus/consensus-types/src/block.rs index 0ea1d38b023a1..cadc7957066ee 100644 --- a/consensus/consensus-types/src/block.rs +++ b/consensus/consensus-types/src/block.rs @@ -116,6 +116,9 @@ impl Block { Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) => { inline_batches.len() + proof_with_data.proofs.len() }, + Payload::OptQuorumStore(opt_quorum_store_payload) => { + opt_quorum_store_payload.num_txns() + }, }, } } @@ -495,7 +498,7 @@ impl Block { ) }) }) - .map(|index| u32::try_from(index).unwrap()) + .map(|index| u32::try_from(index).expect("Index is out of bounds for u32")) .collect() } } diff --git a/consensus/consensus-types/src/block_retrieval.rs b/consensus/consensus-types/src/block_retrieval.rs index 89e9ca0121fc9..f773a92d9e9a0 100644 --- a/consensus/consensus-types/src/block_retrieval.rs +++ b/consensus/consensus-types/src/block_retrieval.rs @@ -117,8 +117,10 @@ impl BlockRetrievalResponse { ); ensure!( self.status != BlockRetrievalStatus::SucceededWithTarget - || (!self.blocks.is_empty() - && retrieval_request.match_target_id(self.blocks.last().unwrap().id())), + || self + .blocks + .last() + .map_or(false, |block| retrieval_request.match_target_id(block.id())), "target not found in blocks returned, expect {:?}", retrieval_request.target_block_id(), ); diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index 4d0be0f31de79..7dbc1888b7203 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -2,7 +2,11 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::proof_of_store::{BatchInfo, ProofCache, ProofOfStore}; +use crate::{ + payload::{OptQuorumStorePayload, PayloadExecutionLimit}, + proof_of_store::{BatchInfo, ProofCache, ProofOfStore}, +}; +use anyhow::bail; use aptos_crypto::{ hash::{CryptoHash, CryptoHasher}, HashValue, @@ -18,7 +22,12 @@ use aptos_types::{ use once_cell::sync::OnceCell; use rayon::prelude::*; use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt, fmt::Write, sync::Arc}; +use std::{ + collections::HashSet, + fmt::{self, Write}, + sync::Arc, + u64, +}; use tokio::sync::oneshot; /// The round of a block is a consensus-internal counter, which starts with 0 and increases @@ -51,6 +60,36 @@ impl fmt::Display for TransactionSummary { } } +#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct TxnSummaryWithExpiration { + pub sender: AccountAddress, + pub sequence_number: u64, + pub expiration_timestamp_secs: u64, + pub hash: HashValue, +} + +impl TxnSummaryWithExpiration { + pub fn new( + sender: AccountAddress, + sequence_number: u64, + expiration_timestamp_secs: u64, + hash: HashValue, + ) -> Self { + Self { + sender, + sequence_number, + expiration_timestamp_secs, + hash, + } + } +} + +impl fmt::Display for TxnSummaryWithExpiration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}:{}", self.sender, self.sequence_number,) + } +} + #[derive(Clone)] pub struct TransactionInProgress { pub gas_unit_price: u64, @@ -135,6 +174,11 @@ impl ProofWithData { } } + pub fn empty() -> Self { + Self::new(vec![]) + } + + #[allow(clippy::unwrap_used)] pub fn extend(&mut self, other: ProofWithData) { let other_data_status = other.status.lock().as_mut().unwrap().take(); self.proofs.extend(other.proofs); @@ -168,7 +212,7 @@ impl ProofWithData { #[derive(Deserialize, Serialize, Clone, Debug)] pub struct ProofWithDataWithTxnLimit { pub proof_with_data: ProofWithData, - pub max_txns_to_execute: Option, + pub max_txns_to_execute: Option, } impl PartialEq for ProofWithDataWithTxnLimit { @@ -181,7 +225,7 @@ impl PartialEq for ProofWithDataWithTxnLimit { impl Eq for ProofWithDataWithTxnLimit {} impl ProofWithDataWithTxnLimit { - pub fn new(proof_with_data: ProofWithData, max_txns_to_execute: Option) -> Self { + pub fn new(proof_with_data: ProofWithData, max_txns_to_execute: Option) -> Self { Self { proof_with_data, max_txns_to_execute, @@ -197,7 +241,7 @@ impl ProofWithDataWithTxnLimit { } } -fn sum_max_txns_to_execute(m1: Option, m2: Option) -> Option { +fn sum_max_txns_to_execute(m1: Option, m2: Option) -> Option { match (m1, m2) { (None, _) => m2, (_, None) => m1, @@ -214,12 +258,13 @@ pub enum Payload { QuorumStoreInlineHybrid( Vec<(BatchInfo, Vec)>, ProofWithData, - Option, + Option, ), + OptQuorumStore(OptQuorumStorePayload), } impl Payload { - pub fn transform_to_quorum_store_v2(self, max_txns_to_execute: Option) -> Self { + pub fn transform_to_quorum_store_v2(self, max_txns_to_execute: Option) -> Self { match self { Payload::InQuorumStore(proof_with_status) => Payload::InQuorumStoreWithLimit( ProofWithDataWithTxnLimit::new(proof_with_status, max_txns_to_execute), @@ -237,6 +282,12 @@ impl Payload { Payload::DirectMempool(_) => { panic!("Payload is in direct mempool format"); }, + Payload::OptQuorumStore(mut opt_qs_payload) => { + opt_qs_payload.set_execution_limit(PayloadExecutionLimit::max_txns_to_execute( + max_txns_to_execute, + )); + Payload::OptQuorumStore(opt_qs_payload) + }, } } @@ -268,6 +319,33 @@ impl Payload { .map(|(_, txns)| txns.len()) .sum::() }, + Payload::OptQuorumStore(opt_qs_payload) => opt_qs_payload.num_txns(), + } + } + + pub fn len_for_execution(&self) -> u64 { + match self { + Payload::DirectMempool(txns) => txns.len() as u64, + Payload::InQuorumStore(proof_with_status) => proof_with_status.len() as u64, + Payload::InQuorumStoreWithLimit(proof_with_status) => { + // here we return the actual length of the payload; limit is considered at the stage + // where we prepare the block from the payload + (proof_with_status.proof_with_data.len() as u64) + .min(proof_with_status.max_txns_to_execute.unwrap_or(u64::MAX)) + }, + Payload::QuorumStoreInlineHybrid( + inline_batches, + proof_with_data, + max_txns_to_execute, + ) => ((proof_with_data.len() + + inline_batches + .iter() + .map(|(_, txns)| txns.len()) + .sum::()) as u64) + .min(max_txns_to_execute.unwrap_or(u64::MAX)), + Payload::OptQuorumStore(opt_qs_payload) => { + opt_qs_payload.max_txns_to_execute().unwrap_or(u64::MAX) + }, } } @@ -281,6 +359,7 @@ impl Payload { Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) => { proof_with_data.proofs.is_empty() && inline_batches.is_empty() }, + Payload::OptQuorumStore(opt_qs_payload) => opt_qs_payload.is_empty(), } } @@ -338,6 +417,22 @@ impl Payload { p3.extend(p2); Payload::QuorumStoreInlineHybrid(b2, p3, m3) }, + ( + Payload::QuorumStoreInlineHybrid(_inline_batches, _proofs, _limit), + Payload::OptQuorumStore(_opt_qs), + ) + | ( + Payload::OptQuorumStore(_opt_qs), + Payload::QuorumStoreInlineHybrid(_inline_batches, _proofs, _limit), + ) => { + unimplemented!( + "Cannot extend OptQuorumStore with QuorumStoreInlineHybrid or viceversa" + ) + }, + (Payload::OptQuorumStore(opt_qs1), Payload::OptQuorumStore(opt_qs2)) => { + let opt_qs3 = opt_qs1.extend(opt_qs2); + Payload::OptQuorumStore(opt_qs3) + }, (_, _) => unreachable!(), } } @@ -346,7 +441,11 @@ impl Payload { matches!(self, Payload::DirectMempool(_)) } - /// This is computationally expensive on the first call + pub fn is_quorum_store(&self) -> bool { + !matches!(self, Payload::DirectMempool(_)) + } + + /// This is potentially computationally expensive pub fn size(&self) -> usize { match self { Payload::DirectMempool(txns) => txns @@ -365,6 +464,7 @@ impl Payload { .map(|(batch_info, _)| batch_info.num_bytes() as usize) .sum::() }, + Payload::OptQuorumStore(opt_qs_payload) => opt_qs_payload.num_bytes(), } } @@ -417,6 +517,12 @@ impl Payload { } Ok(()) }, + (true, Payload::OptQuorumStore(opt_quorum_store)) => { + let proof_with_data = opt_quorum_store.proof_with_data(); + Self::verify_with_cache(&proof_with_data.batch_summary, validator, proof_cache)?; + // TODO(ibalajiarun): Remove this log when OptQS is enabled. + bail!("OptQuorumStore Payload is not expected yet"); + }, (_, _) => Err(anyhow::anyhow!( "Wrong payload type. Expected Payload::InQuorumStore {} got {} ", quorum_store_enabled, @@ -453,6 +559,9 @@ impl fmt::Display for Payload { proof_with_data.proofs.len() ) }, + Payload::OptQuorumStore(opt_quorum_store) => { + write!(f, "{}", opt_quorum_store) + }, } } } @@ -563,6 +672,14 @@ impl From<&Vec<&Payload>> for PayloadFilter { Payload::DirectMempool(_) => { error!("DirectMempool payload in InQuorumStore filter"); }, + Payload::OptQuorumStore(opt_qs_payload) => { + for batch_info in &opt_qs_payload.opt_batches().batch_summary { + exclude_proofs.insert(batch_info.clone()); + } + for proof in &opt_qs_payload.proof_with_data().batch_summary { + exclude_proofs.insert(proof.info().clone()); + } + }, } } PayloadFilter::InQuorumStore(exclude_proofs) diff --git a/consensus/consensus-types/src/lib.rs b/consensus/consensus-types/src/lib.rs index e398e34ecc15d..c9e555da27a7d 100644 --- a/consensus/consensus-types/src/lib.rs +++ b/consensus/consensus-types/src/lib.rs @@ -13,7 +13,9 @@ pub mod epoch_retrieval; pub mod order_vote; pub mod order_vote_msg; pub mod order_vote_proposal; +pub mod payload; pub mod pipeline; +pub mod pipeline_execution_result; pub mod pipelined_block; pub mod proof_of_store; pub mod proposal_ext; @@ -24,6 +26,7 @@ pub mod request_response; pub mod safety_data; pub mod sync_info; pub mod timeout_2chain; +pub mod utils; pub mod vote; pub mod vote_data; pub mod vote_msg; diff --git a/consensus/consensus-types/src/order_vote_msg.rs b/consensus/consensus-types/src/order_vote_msg.rs index 42af12eea7081..1fae85cc25a60 100644 --- a/consensus/consensus-types/src/order_vote_msg.rs +++ b/consensus/consensus-types/src/order_vote_msg.rs @@ -44,7 +44,9 @@ impl OrderVoteMsg { self.order_vote.epoch() } - pub fn verify(&self, validator: &ValidatorVerifier) -> anyhow::Result<()> { + /// This function verifies the order_vote component in the order_vote_msg. + /// The quorum cert is verified in the round manager when the quorum certificate is used. + pub fn verify_order_vote(&self, validator: &ValidatorVerifier) -> anyhow::Result<()> { ensure!( self.quorum_cert().certified_block() == self.order_vote().ledger_info().commit_info(), "QuorumCert and OrderVote do not match" @@ -52,12 +54,6 @@ impl OrderVoteMsg { self.order_vote .verify(validator) .context("[OrderVoteMsg] OrderVote verification failed")?; - - // TODO: As we receive many order votes with the same quroum cert, we could cache it - // without verifying it every time. - self.quorum_cert - .verify(validator) - .context("[OrderVoteMsg QuorumCert verification failed")?; Ok(()) } } diff --git a/consensus/consensus-types/src/payload.rs b/consensus/consensus-types/src/payload.rs new file mode 100644 index 0000000000000..4ce95df5e95e6 --- /dev/null +++ b/consensus/consensus-types/src/payload.rs @@ -0,0 +1,375 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::proof_of_store::{BatchInfo, ProofOfStore}; +use aptos_executor_types::ExecutorResult; +use aptos_infallible::Mutex; +use aptos_types::{transaction::SignedTransaction, PeerId}; +use core::fmt; +use futures::{ + future::{BoxFuture, Shared}, + FutureExt, +}; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::Debug, + ops::{Deref, DerefMut}, + sync::Arc, +}; + +pub trait TDataInfo { + fn num_txns(&self) -> u64; + + fn num_bytes(&self) -> u64; + + fn info(&self) -> &BatchInfo; + + fn signers(&self, ordered_authors: &[PeerId]) -> Vec; +} + +pub struct DataFetchFut { + pub iteration: u32, + pub fut: Shared>>>, +} + +impl fmt::Debug for DataFetchFut { + fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Ok(()) + } +} + +impl DataFetchFut { + pub fn extend(&mut self, other: DataFetchFut) { + let self_fut = self.fut.clone(); + self.fut = async move { + let result1 = self_fut.await?; + let result2 = other.fut.await?; + let result = [result1, result2].concat(); + Ok(result) + } + .boxed() + .shared(); + } +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct BatchPointer { + pub batch_summary: Vec, + #[serde(skip)] + pub data_fut: Arc>>, +} + +impl BatchPointer +where + T: TDataInfo, +{ + pub fn new(metadata: Vec) -> Self { + Self { + batch_summary: metadata, + data_fut: Arc::new(Mutex::new(None)), + } + } + + pub fn extend(&mut self, other: BatchPointer) { + let other_data_status = other.data_fut.lock().take().expect("must be initialized"); + self.batch_summary.extend(other.batch_summary); + let mut status = self.data_fut.lock(); + *status = match &mut *status { + None => Some(other_data_status), + Some(status) => { + status.extend(other_data_status); + return; + }, + }; + } + + pub fn num_txns(&self) -> usize { + self.batch_summary + .iter() + .map(|info| info.num_txns() as usize) + .sum() + } + + pub fn num_bytes(&self) -> usize { + self.batch_summary + .iter() + .map(|info| info.num_bytes() as usize) + .sum() + } + + pub fn is_empty(&self) -> bool { + self.batch_summary.is_empty() + } +} + +impl From> for BatchPointer +where + T: TDataInfo, +{ + fn from(value: Vec) -> Self { + Self { + batch_summary: value, + data_fut: Arc::new(Mutex::new(None)), + } + } +} + +impl PartialEq for BatchPointer { + fn eq(&self, other: &Self) -> bool { + self.batch_summary == other.batch_summary + && Arc::as_ptr(&self.data_fut) == Arc::as_ptr(&other.data_fut) + } +} + +impl Eq for BatchPointer {} + +impl Deref for BatchPointer { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.batch_summary + } +} + +impl IntoIterator for BatchPointer { + type IntoIter = std::vec::IntoIter; + type Item = T; + + fn into_iter(self) -> Self::IntoIter { + self.batch_summary.into_iter() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub enum PayloadExecutionLimit { + None, + MaxTransactionsToExecute(u64), +} + +impl PayloadExecutionLimit { + pub(crate) fn extend(&mut self, other: PayloadExecutionLimit) { + *self = match (&self, &other) { + (PayloadExecutionLimit::None, _) => other, + (_, PayloadExecutionLimit::None) => return, + ( + PayloadExecutionLimit::MaxTransactionsToExecute(limit1), + PayloadExecutionLimit::MaxTransactionsToExecute(limit2), + ) => PayloadExecutionLimit::MaxTransactionsToExecute(*limit1 + *limit2), + }; + } + + pub(crate) fn max_txns_to_execute(limit: Option) -> Self { + limit.map_or(PayloadExecutionLimit::None, |val| { + PayloadExecutionLimit::MaxTransactionsToExecute(val) + }) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct InlineBatch { + batch_info: BatchInfo, + transactions: Vec, +} + +impl InlineBatch { + pub fn new(batch_info: BatchInfo, transactions: Vec) -> Self { + Self { + batch_info, + transactions, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct InlineBatches(Vec); + +impl InlineBatches { + fn num_txns(&self) -> usize { + self.0 + .iter() + .map(|batch| batch.batch_info.num_txns() as usize) + .sum() + } + + fn num_bytes(&self) -> usize { + self.0 + .iter() + .map(|batch| batch.batch_info.num_bytes() as usize) + .sum() + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn transactions(&self) -> Vec { + self.0 + .iter() + .flat_map(|inline_batch| inline_batch.transactions.clone()) + .collect() + } + + pub fn batch_infos(&self) -> Vec { + self.0 + .iter() + .map(|inline_batch| inline_batch.batch_info.clone()) + .collect() + } +} + +impl From> for InlineBatches { + fn from(value: Vec) -> Self { + Self(value) + } +} + +impl From)>> for InlineBatches { + fn from(value: Vec<(BatchInfo, Vec)>) -> Self { + value + .into_iter() + .map(|(batch_info, transactions)| InlineBatch::new(batch_info, transactions)) + .collect::>() + .into() + } +} + +impl Deref for InlineBatches { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for InlineBatches { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct OptQuorumStorePayloadV1 { + inline_batches: InlineBatches, + opt_batches: BatchPointer, + proofs: BatchPointer, + execution_limits: PayloadExecutionLimit, +} + +impl OptQuorumStorePayloadV1 { + pub fn get_all_batch_infos(self) -> Vec { + let Self { + inline_batches, + opt_batches, + proofs, + execution_limits: _, + } = self; + inline_batches + .0 + .into_iter() + .map(|batch| batch.batch_info) + .chain(opt_batches) + .chain(proofs.into_iter().map(|proof| proof.info().clone())) + .collect() + } + + pub fn max_txns_to_execute(&self) -> Option { + match self.execution_limits { + PayloadExecutionLimit::None => None, + PayloadExecutionLimit::MaxTransactionsToExecute(max) => Some(max), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub enum OptQuorumStorePayload { + V1(OptQuorumStorePayloadV1), +} + +impl OptQuorumStorePayload { + pub fn new( + inline_batches: InlineBatches, + opt_batches: BatchPointer, + proofs: BatchPointer, + execution_limits: PayloadExecutionLimit, + ) -> Self { + Self::V1(OptQuorumStorePayloadV1 { + inline_batches, + opt_batches, + proofs, + execution_limits, + }) + } + + pub(crate) fn num_txns(&self) -> usize { + self.opt_batches.num_txns() + self.proofs.num_txns() + self.inline_batches.num_txns() + } + + pub(crate) fn is_empty(&self) -> bool { + self.opt_batches.is_empty() && self.proofs.is_empty() && self.inline_batches.is_empty() + } + + pub(crate) fn extend(mut self, other: Self) -> Self { + let other: OptQuorumStorePayloadV1 = other.into_inner(); + self.inline_batches.extend(other.inline_batches.0); + self.opt_batches.extend(other.opt_batches); + self.proofs.extend(other.proofs); + self.execution_limits.extend(other.execution_limits); + self + } + + pub(crate) fn num_bytes(&self) -> usize { + self.opt_batches.num_bytes() + self.proofs.num_bytes() + self.inline_batches.num_bytes() + } + + pub fn into_inner(self) -> OptQuorumStorePayloadV1 { + match self { + OptQuorumStorePayload::V1(opt_qs_payload) => opt_qs_payload, + } + } + + pub fn inline_batches(&self) -> &InlineBatches { + &self.inline_batches + } + + pub fn proof_with_data(&self) -> &BatchPointer { + &self.proofs + } + + pub fn opt_batches(&self) -> &BatchPointer { + &self.opt_batches + } + + pub fn set_execution_limit(&mut self, execution_limits: PayloadExecutionLimit) { + self.execution_limits = execution_limits; + } +} + +impl Deref for OptQuorumStorePayload { + type Target = OptQuorumStorePayloadV1; + + fn deref(&self) -> &Self::Target { + match self { + OptQuorumStorePayload::V1(opt_qs_payload) => opt_qs_payload, + } + } +} + +impl DerefMut for OptQuorumStorePayload { + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + OptQuorumStorePayload::V1(opt_qs_payload) => opt_qs_payload, + } + } +} + +impl fmt::Display for OptQuorumStorePayload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "OptQuorumStorePayload(opt_batches: {}, proofs: {}, limits: {:?})", + self.opt_batches.num_txns(), + self.proofs.num_txns(), + self.execution_limits, + ) + } +} diff --git a/consensus/consensus-types/src/pipeline/commit_decision.rs b/consensus/consensus-types/src/pipeline/commit_decision.rs index 102d4e0d3c5ff..8eaae44e8646c 100644 --- a/consensus/consensus-types/src/pipeline/commit_decision.rs +++ b/consensus/consensus-types/src/pipeline/commit_decision.rs @@ -58,4 +58,8 @@ impl CommitDecision { .verify_signatures(validator) .context("Failed to verify Commit Decision") } + + pub fn into_inner(self) -> LedgerInfoWithSignatures { + self.ledger_info + } } diff --git a/consensus/consensus-types/src/pipeline_execution_result.rs b/consensus/consensus-types/src/pipeline_execution_result.rs new file mode 100644 index 0000000000000..03b59ee25c283 --- /dev/null +++ b/consensus/consensus-types/src/pipeline_execution_result.rs @@ -0,0 +1,34 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_executor_types::{ExecutorResult, StateComputeResult}; +use aptos_types::transaction::SignedTransaction; +use derivative::Derivative; +use futures::future::BoxFuture; +use std::time::Duration; + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct PipelineExecutionResult { + pub input_txns: Vec, + pub result: StateComputeResult, + pub execution_time: Duration, + #[derivative(Debug = "ignore")] + pub pre_commit_fut: BoxFuture<'static, ExecutorResult<()>>, +} + +impl PipelineExecutionResult { + pub fn new( + input_txns: Vec, + result: StateComputeResult, + execution_time: Duration, + pre_commit_fut: BoxFuture<'static, ExecutorResult<()>>, + ) -> Self { + Self { + input_txns, + result, + execution_time, + pre_commit_fut, + } + } +} diff --git a/consensus/consensus-types/src/pipelined_block.rs b/consensus/consensus-types/src/pipelined_block.rs index 2935cba7dedd5..375dd93a0a1cf 100644 --- a/consensus/consensus-types/src/pipelined_block.rs +++ b/consensus/consensus-types/src/pipelined_block.rs @@ -6,15 +6,23 @@ use crate::{ block::Block, common::{Payload, Round}, order_vote_proposal::OrderVoteProposal, + pipeline_execution_result::PipelineExecutionResult, quorum_cert::QuorumCert, vote_proposal::VoteProposal, }; -use aptos_crypto::hash::HashValue; -use aptos_executor_types::StateComputeResult; +use aptos_crypto::hash::{HashValue, ACCUMULATOR_PLACEHOLDER_HASH}; +use aptos_executor_types::{ExecutorResult, StateComputeResult}; +use aptos_infallible::Mutex; +use aptos_logger::{error, warn}; use aptos_types::{ - block_info::BlockInfo, contract_event::ContractEvent, randomness::Randomness, - transaction::SignedTransaction, validator_txn::ValidatorTransaction, + block_info::BlockInfo, + contract_event::ContractEvent, + randomness::Randomness, + transaction::{SignedTransaction, TransactionStatus}, + validator_txn::ValidatorTransaction, }; +use derivative::Derivative; +use futures::future::BoxFuture; use once_cell::sync::OnceCell; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::{ @@ -26,7 +34,8 @@ use std::{ /// A representation of a block that has been added to the execution pipeline. It might either be in ordered /// or in executed state. In the ordered state, the block is waiting to be executed. In the executed state, /// the block has been executed and the output is available. -#[derive(Clone, Eq, PartialEq)] +#[derive(Derivative, Clone)] +#[derivative(Eq, PartialEq)] pub struct PipelinedBlock { /// Block data that cannot be regenerated. block: Block, @@ -38,6 +47,9 @@ pub struct PipelinedBlock { state_compute_result: StateComputeResult, randomness: OnceCell, pipeline_insertion_time: OnceCell, + execution_summary: Arc>, + #[derivative(PartialEq = "ignore")] + pre_commit_fut: Arc>>>>, } impl Serialize for PipelinedBlock { @@ -91,6 +103,8 @@ impl<'de> Deserialize<'de> for PipelinedBlock { state_compute_result, randomness: OnceCell::new(), pipeline_insertion_time: OnceCell::new(), + execution_summary: Arc::new(OnceCell::new()), + pre_commit_fut: Arc::new(Mutex::new(None)), }; if let Some(r) = randomness { block.set_randomness(r); @@ -102,14 +116,69 @@ impl<'de> Deserialize<'de> for PipelinedBlock { impl PipelinedBlock { pub fn set_execution_result( mut self, - input_transactions: Vec, - result: StateComputeResult, + pipeline_execution_result: PipelineExecutionResult, ) -> Self { + let PipelineExecutionResult { + input_txns, + result, + execution_time, + pre_commit_fut, + } = pipeline_execution_result; + self.state_compute_result = result; - self.input_transactions = input_transactions; + self.input_transactions = input_txns; + self.pre_commit_fut = Arc::new(Mutex::new(Some(pre_commit_fut))); + + let mut to_commit = 0; + let mut to_retry = 0; + for txn in self.state_compute_result.compute_status_for_input_txns() { + match txn { + TransactionStatus::Keep(_) => to_commit += 1, + TransactionStatus::Retry => to_retry += 1, + _ => {}, + } + } + + let execution_summary = ExecutionSummary { + payload_len: self + .block + .payload() + .map_or(0, |payload| payload.len_for_execution()), + to_commit, + to_retry, + execution_time, + root_hash: self.state_compute_result.root_hash(), + }; + + // We might be retrying execution, so it might have already been set. + // Because we use this for statistics, it's ok that we drop the newer value. + if let Some(previous) = self.execution_summary.get() { + if previous.root_hash == execution_summary.root_hash + || previous.root_hash == *ACCUMULATOR_PLACEHOLDER_HASH + { + warn!( + "Skipping re-inserting execution result, from {:?} to {:?}", + previous, execution_summary + ); + } else { + error!( + "Re-inserting execution result with different root hash: from {:?} to {:?}", + previous, execution_summary + ); + } + } else { + self.execution_summary + .set(execution_summary) + .expect("inserting into empty execution summary"); + } self } + #[cfg(any(test, feature = "fuzzing"))] + pub fn mark_successful_pre_commit_for_test(&self) { + *self.pre_commit_fut.lock() = Some(Box::pin(async { Ok(()) })); + } + pub fn set_randomness(&self, randomness: Randomness) { assert!(self.randomness.set(randomness).is_ok()); } @@ -117,6 +186,13 @@ impl PipelinedBlock { pub fn set_insertion_time(&self) { assert!(self.pipeline_insertion_time.set(Instant::now()).is_ok()); } + + pub fn take_pre_commit_fut(&self) -> BoxFuture<'static, ExecutorResult<()>> { + self.pre_commit_fut + .lock() + .take() + .expect("pre_commit_result_rx missing.") + } } impl Debug for PipelinedBlock { @@ -143,6 +219,8 @@ impl PipelinedBlock { state_compute_result, randomness: OnceCell::new(), pipeline_insertion_time: OnceCell::new(), + execution_summary: Arc::new(OnceCell::new()), + pre_commit_fut: Arc::new(Mutex::new(None)), } } @@ -153,6 +231,8 @@ impl PipelinedBlock { state_compute_result: StateComputeResult::new_dummy(), randomness: OnceCell::new(), pipeline_insertion_time: OnceCell::new(), + execution_summary: Arc::new(OnceCell::new()), + pre_commit_fut: Arc::new(Mutex::new(None)), } } @@ -250,4 +330,17 @@ impl PipelinedBlock { pub fn elapsed_in_pipeline(&self) -> Option { self.pipeline_insertion_time.get().map(|t| t.elapsed()) } + + pub fn get_execution_summary(&self) -> Option { + self.execution_summary.get().cloned() + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct ExecutionSummary { + pub payload_len: u64, + pub to_commit: u64, + pub to_retry: u64, + pub execution_time: Duration, + pub root_hash: HashValue, } diff --git a/consensus/consensus-types/src/proof_of_store.rs b/consensus/consensus-types/src/proof_of_store.rs index 13aaee0181fbc..2110b72c508de 100644 --- a/consensus/consensus-types/src/proof_of_store.rs +++ b/consensus/consensus-types/src/proof_of_store.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::{payload::TDataInfo, utils::PayloadTxnsSize}; use anyhow::{bail, ensure, Context}; use aptos_crypto::{ed25519, CryptoMaterialError, HashValue}; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; @@ -130,12 +131,12 @@ impl BatchInfo { self.num_bytes } - pub fn gas_bucket_start(&self) -> u64 { - self.gas_bucket_start + pub fn size(&self) -> PayloadTxnsSize { + PayloadTxnsSize::new(self.num_txns, self.num_bytes) } - pub fn is_expired(&self) -> bool { - self.expiration() < aptos_infallible::duration_since_epoch().as_micros() as u64 + pub fn gas_bucket_start(&self) -> u64 { + self.gas_bucket_start } } @@ -145,6 +146,24 @@ impl Display for BatchInfo { } } +impl TDataInfo for BatchInfo { + fn num_txns(&self) -> u64 { + self.num_txns() + } + + fn num_bytes(&self) -> u64 { + self.num_bytes() + } + + fn info(&self) -> &BatchInfo { + self + } + + fn signers(&self, _ordered_authors: &[PeerId]) -> Vec { + vec![self.author()] + } +} + #[derive(Clone, Debug, Deserialize, Serialize)] pub struct SignedBatchInfoMsg { signed_infos: Vec, @@ -269,6 +288,7 @@ pub enum SignedBatchInfoError { InvalidAuthor, NotFound, AlreadyCommitted, + NoTimeStamps, } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] @@ -350,10 +370,8 @@ impl ProofOfStore { result } - pub fn shuffled_signers(&self, validator: &ValidatorVerifier) -> Vec { - let mut ret: Vec = self - .multi_signature - .get_signers_addresses(&validator.get_ordered_account_addresses()); + pub fn shuffled_signers(&self, ordered_authors: &[PeerId]) -> Vec { + let mut ret: Vec = self.multi_signature.get_signers_addresses(ordered_authors); ret.shuffle(&mut thread_rng()); ret } @@ -374,3 +392,21 @@ impl Deref for ProofOfStore { &self.info } } + +impl TDataInfo for ProofOfStore { + fn num_txns(&self) -> u64 { + self.num_txns + } + + fn num_bytes(&self) -> u64 { + self.num_bytes + } + + fn info(&self) -> &BatchInfo { + self.info() + } + + fn signers(&self, ordered_authors: &[PeerId]) -> Vec { + self.shuffled_signers(ordered_authors) + } +} diff --git a/consensus/consensus-types/src/request_response.rs b/consensus/consensus-types/src/request_response.rs index a28082b6c0c7f..c650141e7878a 100644 --- a/consensus/consensus-types/src/request_response.rs +++ b/consensus/consensus-types/src/request_response.rs @@ -1,47 +1,47 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::common::{Payload, PayloadFilter}; +use crate::{ + common::{Payload, PayloadFilter}, + utils::PayloadTxnsSize, +}; use anyhow::Result; use futures::channel::oneshot; -use std::{fmt, fmt::Formatter}; +use std::{fmt, fmt::Formatter, time::Duration}; + +pub struct GetPayloadRequest { + // max number of transactions in the block + pub max_txns: PayloadTxnsSize, + // max number of transactions after filtering in the block + pub max_txns_after_filtering: u64, + // soft max number of transactions after filtering in the block (i.e. include one that crosses it) + pub soft_max_txns_after_filtering: u64, + // target txns with opt batches in max_txns as pct + pub opt_batch_txns_pct: u8, + // max number of inline transactions (transactions without a proof of store) + pub max_inline_txns: PayloadTxnsSize, + // return non full + pub return_non_full: bool, + // block payloads to exclude from the requested block + pub filter: PayloadFilter, + // callback to respond to + pub callback: oneshot::Sender>, + // block timestamp + pub block_timestamp: Duration, +} pub enum GetPayloadCommand { /// Request to pull block to submit to consensus. - GetPayloadRequest( - // max block size - u64, - // max byte size - u64, - // max number of inline transactions (transactions without a proof of store) - u64, - // max byte size of inline transactions (transactions without a proof of store) - u64, - // return non full - bool, - // block payloads to exclude from the requested block - PayloadFilter, - // callback to respond to - oneshot::Sender>, - ), + GetPayloadRequest(GetPayloadRequest), } impl fmt::Display for GetPayloadCommand { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - GetPayloadCommand::GetPayloadRequest( - max_txns, - max_bytes, - max_inline_txns, - max_inline_bytes, - return_non_full, - excluded, - _, - ) => { - write!( - f, - "GetPayloadRequest [max_txns: {}, max_bytes: {}, max_inline_txns: {}, max_inline_bytes:{}, return_non_full: {}, excluded: {}]", - max_txns, max_bytes, max_inline_txns, max_inline_bytes, return_non_full, excluded + GetPayloadCommand::GetPayloadRequest(request) => { + write!(f, + "GetPayloadRequest [max_txns: {}, max_txns_after_filtering: {}, soft_max_txns_after_filtering: {}, max_inline_txns: {}, return_non_full: {}, block_timestamp: {:?}]", + request.max_txns, request.max_txns_after_filtering, request.soft_max_txns_after_filtering, request.max_inline_txns, request.return_non_full, request.block_timestamp ) }, } diff --git a/consensus/consensus-types/src/timeout_2chain.rs b/consensus/consensus-types/src/timeout_2chain.rs index e69c7cc8e9d0c..07a4a524ce36e 100644 --- a/consensus/consensus-types/src/timeout_2chain.rs +++ b/consensus/consensus-types/src/timeout_2chain.rs @@ -167,7 +167,7 @@ impl TwoChainTimeoutCertificate { .rounds() .iter() .max() - .expect("Empty rounds"); + .ok_or_else(|| anyhow::anyhow!("Empty rounds"))?; ensure!( hqc_round == *signed_hqc, "Inconsistent hqc round, qc has round {}, highest signed round {}", diff --git a/consensus/consensus-types/src/utils.rs b/consensus/consensus-types/src/utils.rs new file mode 100644 index 0000000000000..091ea0bdaca1c --- /dev/null +++ b/consensus/consensus-types/src/utils.rs @@ -0,0 +1,266 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::ensure; +use aptos_logger::warn; +use core::fmt; +use serde::Serialize; +use std::cmp::{max, Ordering}; + +/// This struct always ensures the following invariants: +/// * count <= bytes +/// * (count > 0 && bytes > 0) || (count == 0 && bytes == 0) +#[derive(Debug, Clone, Copy, Serialize, Default)] +pub struct PayloadTxnsSize { + count: u64, + bytes: u64, +} + +impl PayloadTxnsSize { + pub fn new(count: u64, bytes: u64) -> Self { + match Self::try_new(count, bytes) { + Ok(txns_size) => txns_size, + Err(err) => { + warn!( + "Invalid input for PayloadTxnsSize. Normalizing. Count: {}, Bytes: {}, Err: {}", + count, bytes, err + ); + Self::new_normalized(count, bytes) + }, + } + } + + fn new_normalized(count: u64, bytes: u64) -> Self { + let mut count = count; + let mut bytes = bytes; + if count > bytes { + bytes = count; + } + if count == 0 || bytes == 0 { + count = 0; + bytes = 0; + } + Self { count, bytes } + } + + fn try_new(count: u64, bytes: u64) -> anyhow::Result { + ensure!(count <= bytes); + ensure!((count > 0 && bytes > 0) || (count == 0 && bytes == 0)); + + Ok(Self { count, bytes }) + } + + pub fn zero() -> Self { + Self { count: 0, bytes: 0 } + } + + pub fn count(&self) -> u64 { + self.count + } + + pub fn size_in_bytes(&self) -> u64 { + self.bytes + } + + pub fn compute_pct(self, pct: u8) -> Self { + Self::new_normalized(self.count * pct as u64 / 100, self.bytes * pct as u64 / 100) + } + + pub fn saturating_sub(self, rhs: Self) -> Self { + Self::new_normalized( + self.count.saturating_sub(rhs.count), + self.bytes.saturating_sub(rhs.bytes), + ) + } + + pub fn set_count(&mut self, new_count: u64) { + if let Err(e) = self.try_set_count(new_count) { + warn!( + "Invalid set count. Resetting bytes. new_count: {}, Self: {}, Error: {}", + new_count, self, e + ); + *self = Self::new_normalized(new_count, new_count); + } + } + + pub fn try_set_count(&mut self, new_count: u64) -> anyhow::Result<()> { + *self = Self::try_new(new_count, self.bytes)?; + Ok(()) + } + + /// Computes a new [PayloadTxnsSize] whose size in bytes is the passed-in value and the + /// count is calculated proportional to bytes. If the existing PayloadTxnsSize is zero + /// then the new size replaces both the count and size in bytes. + pub fn compute_with_bytes(&self, new_size_in_bytes: u64) -> PayloadTxnsSize { + let new_count = if self.bytes > 0 { + let factor = new_size_in_bytes as f64 / self.bytes as f64; + max((self.count as f64 * factor) as u64, 1u64) + } else { + // If bytes is zero, then count is zero. In this case, set the new + // count to be the same as bytes. + new_size_in_bytes + }; + PayloadTxnsSize::new_normalized(new_count, new_size_in_bytes) + } + + pub fn minimum(self, other: Self) -> Self { + let count = self.count.min(other.count); + let bytes = self.bytes.min(other.bytes); + PayloadTxnsSize::new_normalized(count, bytes) + } + + pub fn maximum(self, other: Self) -> Self { + let count = self.count.max(other.count); + let bytes = self.bytes.max(other.bytes); + PayloadTxnsSize::new_normalized(count, bytes) + } +} + +impl std::ops::Add for PayloadTxnsSize { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + Self::new_normalized(self.count + rhs.count, self.bytes + rhs.bytes) + } +} + +impl std::ops::AddAssign for PayloadTxnsSize { + fn add_assign(&mut self, rhs: Self) { + *self = Self::new_normalized(self.count + rhs.count, self.bytes + rhs.bytes); + } +} + +impl std::ops::Sub for PayloadTxnsSize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + Self::new_normalized(self.count - rhs.count, self.bytes - rhs.bytes) + } +} + +impl std::ops::SubAssign for PayloadTxnsSize { + fn sub_assign(&mut self, rhs: Self) { + *self = Self::new_normalized(self.count - rhs.count, self.bytes - rhs.bytes); + } +} + +impl PartialEq for PayloadTxnsSize { + fn eq(&self, other: &Self) -> bool { + self.count == other.count && self.bytes == other.bytes + } +} + +impl PartialOrd for PayloadTxnsSize { + fn partial_cmp(&self, other: &Self) -> Option { + if self.count == other.count && self.bytes == other.bytes { + return Some(Ordering::Equal); + } + + if self.count > other.count || self.bytes > other.bytes { + return Some(Ordering::Greater); + } + + if self.count < other.count && self.bytes < other.bytes { + return Some(Ordering::Less); + } + + None + } +} + +impl Eq for PayloadTxnsSize {} + +impl fmt::Display for PayloadTxnsSize { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "PayloadTxnsSize[count: {}, bytes: {}]", + self.count, self.bytes + ) + } +} + +#[cfg(test)] +mod tests { + use super::PayloadTxnsSize; + + #[test] + fn test_payload_txns_size_operations() { + let txns_size = PayloadTxnsSize::new(100, 100); + assert_eq!(txns_size.compute_pct(90), PayloadTxnsSize::new(90, 90)); + assert_eq!(txns_size.compute_pct(50), PayloadTxnsSize::new(50, 50)); + + let mut txns_size2 = txns_size; + txns_size2.set_count(50); + assert_eq!(txns_size2, PayloadTxnsSize::new(50, 100)); + txns_size2.set_count(200); + assert_eq!(txns_size2, PayloadTxnsSize::new(200, 200)); + + let txns_size3 = txns_size; + let txns_size4 = txns_size; + assert_eq!(txns_size3 + txns_size4, PayloadTxnsSize::new(200, 200)); + assert_eq!(txns_size3 - txns_size4, PayloadTxnsSize::zero()); + + let mut txns_size5 = txns_size; + txns_size5 += txns_size3; + assert_eq!(txns_size5, PayloadTxnsSize::new(200, 200)); + txns_size5 -= txns_size3; + assert_eq!(txns_size5, PayloadTxnsSize::new(100, 100)); + + assert_eq!( + txns_size.compute_with_bytes(200), + PayloadTxnsSize::new(200, 200) + ); + assert_eq!( + txns_size.compute_with_bytes(50), + PayloadTxnsSize::new(50, 50) + ); + + assert_eq!( + txns_size.saturating_sub(txns_size2), + PayloadTxnsSize::zero() + ); + assert_eq!( + txns_size2.saturating_sub(txns_size), + PayloadTxnsSize::new(100, 100) + ); + + let txns_size5 = PayloadTxnsSize::zero(); + assert_eq!( + txns_size5.compute_with_bytes(100), + PayloadTxnsSize::new(100, 100) + ); + + let txns_size6 = PayloadTxnsSize::new(10, 30); + let txns_size7 = PayloadTxnsSize::new(20, 20); + assert_eq!(txns_size6.minimum(txns_size7), PayloadTxnsSize::new(10, 20)); + assert_eq!(txns_size6.maximum(txns_size7), PayloadTxnsSize::new(20, 30)); + + assert_eq!( + txns_size6.saturating_sub(txns_size7), + PayloadTxnsSize::zero() + ); + + assert_eq!( + PayloadTxnsSize::try_new(100, 0).unwrap_err().to_string(), + "Condition failed: `count <= bytes` (100 vs 0)" + ); + assert_eq!( + PayloadTxnsSize::try_new(100, 10).unwrap_err().to_string(), + "Condition failed: `count <= bytes` (100 vs 10)" + ); + + let mut txns_size8 = txns_size; + assert_eq!( + txns_size8.try_set_count(200).unwrap_err().to_string(), + "Condition failed: `count <= bytes` (200 vs 100)" + ); + txns_size8.set_count(200); + assert_eq!(txns_size8, PayloadTxnsSize::new(200, 200)); + + let txns_size9 = PayloadTxnsSize::new(3, 3000); + let txns_size10 = PayloadTxnsSize::new(2, 100); + let txns_size11 = PayloadTxnsSize::new(2, 200); + assert!(txns_size10 + txns_size11 > txns_size9); + } +} diff --git a/consensus/consensus-types/src/wrapped_ledger_info.rs b/consensus/consensus-types/src/wrapped_ledger_info.rs index 6125f85ca2c94..ee254af17304b 100644 --- a/consensus/consensus-types/src/wrapped_ledger_info.rs +++ b/consensus/consensus-types/src/wrapped_ledger_info.rs @@ -77,6 +77,10 @@ impl WrappedLedgerInfo { &self.signed_ledger_info } + pub fn epoch(&self) -> u64 { + self.ledger_info().ledger_info().epoch() + } + pub fn commit_info(&self) -> &BlockInfo { self.ledger_info().ledger_info().commit_info() } diff --git a/consensus/safety-rules/Cargo.toml b/consensus/safety-rules/Cargo.toml index 3132a07dba70d..962b14c1ceabc 100644 --- a/consensus/safety-rules/Cargo.toml +++ b/consensus/safety-rules/Cargo.toml @@ -13,7 +13,6 @@ repository = { workspace = true } rust-version = { workspace = true } [dependencies] -anyhow = { workspace = true } aptos-config = { workspace = true } aptos-consensus-types = { workspace = true } aptos-crypto = { workspace = true } @@ -25,6 +24,7 @@ aptos-secure-net = { workspace = true } aptos-secure-storage = { workspace = true } aptos-types = { workspace = true } aptos-vault-client = { workspace = true } +hex = { workspace = true } once_cell = { workspace = true } proptest = { workspace = true, optional = true } rand = { workspace = true } diff --git a/consensus/safety-rules/benches/safety_rules.rs b/consensus/safety-rules/benches/safety_rules.rs index 772df9c01747b..3ea5bb82ffc5f 100644 --- a/consensus/safety-rules/benches/safety_rules.rs +++ b/consensus/safety-rules/benches/safety_rules.rs @@ -2,6 +2,8 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use aptos_consensus_types::block::block_test_utils; use aptos_safety_rules::{test_utils, PersistentSafetyStorage, SafetyRulesManager, TSafetyRules}; use aptos_secure_storage::{InMemoryStorage, KVStorage, OnDiskStorage, Storage, VaultStorage}; diff --git a/consensus/safety-rules/src/counters.rs b/consensus/safety-rules/src/counters.rs index 026370998196a..91ed83c7a6f1e 100644 --- a/consensus/safety-rules/src/counters.rs +++ b/consensus/safety-rules/src/counters.rs @@ -2,6 +2,8 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use aptos_metrics_core::{ register_histogram_vec, register_int_counter_vec, register_int_gauge_vec, HistogramTimer, HistogramVec, IntCounterVec, IntGaugeVec, diff --git a/consensus/safety-rules/src/error.rs b/consensus/safety-rules/src/error.rs index 19f71a101df7f..980a3269d30c8 100644 --- a/consensus/safety-rules/src/error.rs +++ b/consensus/safety-rules/src/error.rs @@ -70,6 +70,7 @@ impl From for Error { } impl From for Error { + #[allow(clippy::fallible_impl_from)] fn from(error: aptos_secure_net::Error) -> Self { Self::InternalError(error.to_string()) } diff --git a/consensus/safety-rules/src/fuzzing_utils.rs b/consensus/safety-rules/src/fuzzing_utils.rs index 22b9d5ca5ee7c..c9438d27617fa 100644 --- a/consensus/safety-rules/src/fuzzing_utils.rs +++ b/consensus/safety-rules/src/fuzzing_utils.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -#![allow(clippy::arc_with_non_send_sync)] +#![allow(clippy::arc_with_non_send_sync, clippy::unwrap_used)] use crate::serializer::SafetyRulesInput; #[cfg(any(test, feature = "fuzzing"))] diff --git a/consensus/safety-rules/src/persistent_safety_storage.rs b/consensus/safety-rules/src/persistent_safety_storage.rs index 570267a584338..77dab6ae44e4f 100644 --- a/consensus/safety-rules/src/persistent_safety_storage.rs +++ b/consensus/safety-rules/src/persistent_safety_storage.rs @@ -96,16 +96,32 @@ impl PersistentSafetyStorage { Ok(self.internal_store.get(OWNER_ACCOUNT).map(|v| v.value)?) } - pub fn consensus_key_for_version( + pub fn consensus_sk_by_pk( &self, - version: ed25519::PublicKey, + pk: ed25519::PublicKey, ) -> Result { let _timer = counters::start_timer("get", CONSENSUS_KEY); - let key: ed25519::PrivateKey = self.internal_store.get(CONSENSUS_KEY).map(|v| v.value)?; - if key.public_key() != version { + let pk_hex = hex::encode(pk.to_bytes()); + let explicit_storage_key = format!("{}_{}", CONSENSUS_KEY, pk_hex); + let explicit_sk = self + .internal_store + .get::(explicit_storage_key.as_str()) + .map(|v| v.value); + let default_sk = self + .internal_store + .get::(CONSENSUS_KEY) + .map(|v| v.value); + let key = match (explicit_sk, default_sk) { + (Ok(sk_0), _) => sk_0, + (Err(_), Ok(sk_1)) => sk_1, + (Err(_), Err(_)) => { + return Err(Error::ValidatorKeyNotFound("not found!".to_string())); + }, + }; + if key.public_key() != pk { return Err(Error::SecureStorageMissingDataError(format!( - "PrivateKey for {:?} not found", - version + "Incorrect sk saved for {:?} the expected pk", + pk ))); } Ok(key) @@ -164,7 +180,6 @@ impl PersistentSafetyStorage { Ok(()) } - #[cfg(any(test, feature = "testing"))] pub fn internal_store(&mut self) -> &mut Storage { &mut self.internal_store } diff --git a/consensus/safety-rules/src/safety_rules.rs b/consensus/safety-rules/src/safety_rules.rs index 285c04a15aa37..a367f0c8d2fbc 100644 --- a/consensus/safety-rules/src/safety_rules.rs +++ b/consensus/safety-rules/src/safety_rules.rs @@ -32,7 +32,7 @@ use aptos_types::{ waypoint::Waypoint, }; use serde::Serialize; -use std::cmp::Ordering; +use std::{cmp::Ordering, sync::Arc}; pub(crate) fn next_round(round: Round) -> Result { u64::checked_add(round, 1).ok_or(Error::IncorrectRound(round)) @@ -316,13 +316,10 @@ impl SafetyRules { Ok(()) } else { // Try to export the consensus key directly from storage. - match self - .persistent_storage - .consensus_key_for_version(expected_key) - { + match self.persistent_storage.consensus_sk_by_pk(expected_key) { Ok(consensus_key) => { self.validator_signer = - Some(ValidatorSigner::new(author, consensus_key)); + Some(ValidatorSigner::new(author, Arc::new(consensus_key))); Ok(()) }, Err(Error::SecureStorageMissingDataError(error)) => { @@ -377,7 +374,17 @@ impl SafetyRules { let old_ledger_info = ledger_info.ledger_info(); - if !old_ledger_info.commit_info().is_ordered_only() { + if !old_ledger_info.commit_info().is_ordered_only() + // When doing fast forward sync, we pull the latest blocks and quorum certs from peers + // and store them in storage. We then compute the root ordered cert and root commit cert + // from storage and start the consensus from there. But given that we are not storing the + // ordered cert obtained from order votes in storage, instead of obtaining the root ordered cert + // from storage, we set root ordered cert to commit certificate. + // This means, the root ordered cert will not have a dummy executed_state_id in this case. + // To handle this, we do not raise error if the old_ledger_info.commit_info() matches with + // new_ledger_info.commit_info(). + && old_ledger_info.commit_info() != new_ledger_info.commit_info() + { return Err(Error::InvalidOrderedLedgerInfo(old_ledger_info.to_string())); } diff --git a/consensus/safety-rules/src/safety_rules_manager.rs b/consensus/safety-rules/src/safety_rules_manager.rs index 5652f40b159e3..418f1e0511b22 100644 --- a/consensus/safety-rules/src/safety_rules_manager.rs +++ b/consensus/safety-rules/src/safety_rules_manager.rs @@ -11,13 +11,13 @@ use crate::{ thread::ThreadService, SafetyRules, TSafetyRules, }; -use anyhow::anyhow; use aptos_config::config::{InitialSafetyRulesConfig, SafetyRulesConfig, SafetyRulesService}; -use aptos_crypto::ed25519::PrivateKey; +use aptos_crypto::ed25519::PublicKey; use aptos_global_constants::CONSENSUS_KEY; use aptos_infallible::RwLock; +use aptos_logger::{info, warn}; use aptos_secure_storage::{KVStorage, Storage}; -use std::{net::SocketAddr, sync::Arc}; +use std::{net::SocketAddr, sync::Arc, time::Instant}; pub fn storage(config: &SafetyRulesConfig) -> PersistentSafetyStorage { let backend = &config.backend; @@ -45,14 +45,17 @@ pub fn storage(config: &SafetyRulesConfig) -> PersistentSafetyStorage { } else { let storage = PersistentSafetyStorage::new(internal_storage, config.enable_cached_safety_data); - // If it's initialized, then we can continue - if storage.author().is_ok() { + + let mut storage = if storage.author().is_ok() { storage } else if !matches!( config.initial_safety_rules_config, InitialSafetyRulesConfig::None ) { - let identity_blob = config.initial_safety_rules_config.identity_blob().unwrap(); + let identity_blob = config + .initial_safety_rules_config + .identity_blob() + .expect("No identity blob in initial safety rules config"); let waypoint = config.initial_safety_rules_config.waypoint(); let backend = &config.backend; @@ -72,19 +75,32 @@ pub fn storage(config: &SafetyRulesConfig) -> PersistentSafetyStorage { panic!( "Safety rules storage is not initialized, provide an initial safety rules config" ) + }; + + // Ensuring all the overriding consensus keys are in the storage. + let timer = Instant::now(); + for blob in config + .initial_safety_rules_config + .overriding_identity_blobs() + .unwrap_or_default() + { + if let Some(sk) = blob.consensus_private_key { + let pk_hex = hex::encode(PublicKey::from(&sk).to_bytes()); + let storage_key = format!("{}_{}", CONSENSUS_KEY, pk_hex); + match storage.internal_store().set(storage_key.as_str(), sk) { + Ok(_) => { + info!("Setting {storage_key} succeeded."); + }, + Err(e) => { + warn!("Setting {storage_key} failed with internal store set error: {e}"); + }, + } + } } - } -} + info!("Overriding key work time: {:?}", timer.elapsed()); -pub fn load_consensus_key_from_secure_storage( - config: &SafetyRulesConfig, -) -> anyhow::Result { - let storage: Storage = (&config.backend).into(); - let storage = Box::new(storage); - let response = storage.get::(CONSENSUS_KEY).map_err(|e| { - anyhow!("load_consensus_key_from_secure_storage failed with storage read error: {e}") - })?; - Ok(response.value) + storage + } } enum SafetyRulesWrapper { diff --git a/consensus/src/block_preparer.rs b/consensus/src/block_preparer.rs index 1c6a88965fc29..aa3d82706f32d 100644 --- a/consensus/src/block_preparer.rs +++ b/consensus/src/block_preparer.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - counters::{MAX_TXNS_FROM_BLOCK_TO_EXECUTE, TXN_SHUFFLE_SECONDS}, - payload_manager::PayloadManager, + counters::{self, MAX_TXNS_FROM_BLOCK_TO_EXECUTE, TXN_SHUFFLE_SECONDS}, + payload_manager::TPayloadManager, transaction_deduper::TransactionDeduper, transaction_filter::TransactionFilter, transaction_shuffler::TransactionShuffler, @@ -12,10 +12,10 @@ use aptos_consensus_types::block::Block; use aptos_executor_types::ExecutorResult; use aptos_types::transaction::SignedTransaction; use fail::fail_point; -use std::sync::Arc; +use std::{sync::Arc, time::Instant}; pub struct BlockPreparer { - payload_manager: Arc, + payload_manager: Arc, txn_filter: Arc, txn_deduper: Arc, txn_shuffler: Arc, @@ -23,7 +23,7 @@ pub struct BlockPreparer { impl BlockPreparer { pub fn new( - payload_manager: Arc, + payload_manager: Arc, txn_filter: Arc, txn_deduper: Arc, txn_shuffler: Arc, @@ -43,6 +43,7 @@ impl BlockPreparer { thread::sleep(Duration::from_millis(10)); Err(ExecutorError::CouldNotGetData) }); + let start_time = Instant::now(); let (txns, max_txns_from_block_to_execute) = self.payload_manager.get_transactions(block).await?; let txn_filter = self.txn_filter.clone(); @@ -51,7 +52,7 @@ impl BlockPreparer { let block_id = block.id(); let block_timestamp_usecs = block.timestamp_usecs(); // Transaction filtering, deduplication and shuffling are CPU intensive tasks, so we run them in a blocking task. - tokio::task::spawn_blocking(move || { + let result = tokio::task::spawn_blocking(move || { let filtered_txns = txn_filter.filter(block_id, block_timestamp_usecs, txns); let deduped_txns = txn_deduper.dedup(filtered_txns); let mut shuffled_txns = { @@ -61,12 +62,14 @@ impl BlockPreparer { }; if let Some(max_txns_from_block_to_execute) = max_txns_from_block_to_execute { - shuffled_txns.truncate(max_txns_from_block_to_execute); + shuffled_txns.truncate(max_txns_from_block_to_execute as usize); } MAX_TXNS_FROM_BLOCK_TO_EXECUTE.observe(shuffled_txns.len() as f64); Ok(shuffled_txns) }) .await - .expect("Failed to spawn blocking task for transaction generation") + .expect("Failed to spawn blocking task for transaction generation"); + counters::BLOCK_PREPARER_LATENCY.observe_duration(start_time.elapsed()); + result } } diff --git a/consensus/src/block_storage/block_store.rs b/consensus/src/block_storage/block_store.rs index 7651076b15d8e..9c4fbd48f6997 100644 --- a/consensus/src/block_storage/block_store.rs +++ b/consensus/src/block_storage/block_store.rs @@ -10,7 +10,7 @@ use crate::{ BlockReader, }, counters, - payload_manager::PayloadManager, + payload_manager::TPayloadManager, persistent_liveness_storage::{ PersistentLivenessStorage, RecoveryData, RootInfo, RootMetadata, }, @@ -19,8 +19,12 @@ use crate::{ }; use anyhow::{bail, ensure, format_err, Context}; use aptos_consensus_types::{ - block::Block, common::Round, pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, - sync_info::SyncInfo, timeout_2chain::TwoChainTimeoutCertificate, + block::Block, + common::Round, + pipelined_block::{ExecutionSummary, PipelinedBlock}, + quorum_cert::QuorumCert, + sync_info::SyncInfo, + timeout_2chain::TwoChainTimeoutCertificate, wrapped_ledger_info::WrappedLedgerInfo, }; use aptos_crypto::{hash::ACCUMULATOR_PLACEHOLDER_HASH, HashValue}; @@ -32,7 +36,9 @@ use futures::executor::block_on; #[cfg(test)] use std::collections::VecDeque; #[cfg(any(test, feature = "fuzzing"))] -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::AtomicBool; +#[cfg(any(test, feature = "fuzzing"))] +use std::sync::atomic::Ordering; use std::{sync::Arc, time::Duration}; #[cfg(test)] @@ -74,7 +80,7 @@ pub struct BlockStore { time_service: Arc, // consistent with round type vote_back_pressure_limit: Round, - payload_manager: Arc, + payload_manager: Arc, #[cfg(any(test, feature = "fuzzing"))] back_pressure_for_test: AtomicBool, order_vote_enabled: bool, @@ -89,7 +95,7 @@ impl BlockStore { max_pruned_blocks_in_mem: usize, time_service: Arc, vote_back_pressure_limit: Round, - payload_manager: Arc, + payload_manager: Arc, order_vote_enabled: bool, pending_blocks: Arc>, ) -> Self { @@ -144,7 +150,7 @@ impl BlockStore { max_pruned_blocks_in_mem: usize, time_service: Arc, vote_back_pressure_limit: Round, - payload_manager: Arc, + payload_manager: Arc, order_vote_enabled: bool, pending_blocks: Arc>, ) -> Self { @@ -290,7 +296,6 @@ impl BlockStore { root_metadata: RootMetadata, blocks: Vec, quorum_certs: Vec, - order_vote_enabled: bool, ) { info!( "Rebuilding block tree. root {:?}, blocks {:?}, qcs {:?}", @@ -318,7 +323,7 @@ impl BlockStore { Arc::clone(&self.time_service), self.vote_back_pressure_limit, self.payload_manager.clone(), - order_vote_enabled, + self.order_vote_enabled, self.pending_blocks.clone(), ) .await; @@ -457,10 +462,90 @@ impl BlockStore { #[cfg(any(test, feature = "fuzzing"))] pub fn set_back_pressure_for_test(&self, back_pressure: bool) { + use std::sync::atomic::Ordering; + self.back_pressure_for_test .store(back_pressure, Ordering::Relaxed) } + pub fn pending_blocks(&self) -> Arc> { + self.pending_blocks.clone() + } + + pub async fn wait_for_payload(&self, block: &Block) -> anyhow::Result<()> { + tokio::time::timeout( + Duration::from_secs(1), + self.payload_manager.get_transactions(block), + ) + .await??; + Ok(()) + } + + pub fn check_payload(&self, proposal: &Block) -> bool { + self.payload_manager.check_payload_availability(proposal) + } +} + +impl BlockReader for BlockStore { + fn block_exists(&self, block_id: HashValue) -> bool { + self.inner.read().block_exists(&block_id) + } + + fn get_block(&self, block_id: HashValue) -> Option> { + self.inner.read().get_block(&block_id) + } + + fn ordered_root(&self) -> Arc { + self.inner.read().ordered_root() + } + + fn commit_root(&self) -> Arc { + self.inner.read().commit_root() + } + + fn get_quorum_cert_for_block(&self, block_id: HashValue) -> Option> { + self.inner.read().get_quorum_cert_for_block(&block_id) + } + + fn path_from_ordered_root(&self, block_id: HashValue) -> Option>> { + self.inner.read().path_from_ordered_root(block_id) + } + + fn path_from_commit_root(&self, block_id: HashValue) -> Option>> { + self.inner.read().path_from_commit_root(block_id) + } + + #[cfg(test)] + fn highest_certified_block(&self) -> Arc { + self.inner.read().highest_certified_block() + } + + fn highest_quorum_cert(&self) -> Arc { + self.inner.read().highest_quorum_cert() + } + + fn highest_ordered_cert(&self) -> Arc { + self.inner.read().highest_ordered_cert() + } + + fn highest_commit_cert(&self) -> Arc { + self.inner.read().highest_commit_cert() + } + + fn highest_2chain_timeout_cert(&self) -> Option> { + self.inner.read().highest_2chain_timeout_cert() + } + + fn sync_info(&self) -> SyncInfo { + SyncInfo::new_decoupled( + self.highest_quorum_cert().as_ref().clone(), + self.highest_ordered_cert().as_ref().clone(), + self.highest_commit_cert().as_ref().clone(), + self.highest_2chain_timeout_cert() + .map(|tc| tc.as_ref().clone()), + ) + } + /// Return if the consensus is backpressured fn vote_back_pressure(&self) -> bool { #[cfg(any(test, feature = "fuzzing"))] @@ -477,11 +562,7 @@ impl BlockStore { ordered_round > self.vote_back_pressure_limit + commit_round } - pub fn pending_blocks(&self) -> Arc> { - self.pending_blocks.clone() - } - - pub fn pipeline_pending_latency(&self, proposal_timestamp: Duration) -> Duration { + fn pipeline_pending_latency(&self, proposal_timestamp: Duration) -> Duration { let ordered_root = self.ordered_root(); let commit_root = self.commit_root(); let pending_path = self @@ -512,7 +593,7 @@ impl BlockStore { // latency not known without non-genesis blocks Duration::ZERO } else { - proposal_timestamp.checked_sub(timestamp).unwrap() + proposal_timestamp.saturating_sub(timestamp) } } @@ -552,73 +633,31 @@ impl BlockStore { Duration::ZERO } } -} -impl BlockReader for BlockStore { - fn block_exists(&self, block_id: HashValue) -> bool { - self.inner.read().block_exists(&block_id) - } - - fn get_block(&self, block_id: HashValue) -> Option> { - self.inner.read().get_block(&block_id) - } - - fn ordered_root(&self) -> Arc { - self.inner.read().ordered_root() - } - - fn commit_root(&self) -> Arc { - self.inner.read().commit_root() - } - - fn get_quorum_cert_for_block(&self, block_id: HashValue) -> Option> { - self.inner.read().get_quorum_cert_for_block(&block_id) - } - - fn path_from_ordered_root(&self, block_id: HashValue) -> Option>> { - self.inner.read().path_from_ordered_root(block_id) - } - - fn path_from_commit_root(&self, block_id: HashValue) -> Option>> { - self.inner.read().path_from_commit_root(block_id) - } - - fn highest_certified_block(&self) -> Arc { - self.inner.read().highest_certified_block() - } - - fn highest_quorum_cert(&self) -> Arc { - self.inner.read().highest_quorum_cert() - } - - fn highest_ordered_cert(&self) -> Arc { - self.inner.read().highest_ordered_cert() - } - - fn highest_commit_cert(&self) -> Arc { - self.inner.read().highest_commit_cert() - } - - fn highest_2chain_timeout_cert(&self) -> Option> { - self.inner.read().highest_2chain_timeout_cert() - } - - fn sync_info(&self) -> SyncInfo { - SyncInfo::new_decoupled( - self.highest_quorum_cert().as_ref().clone(), - self.highest_ordered_cert().as_ref().clone(), - self.highest_commit_cert().as_ref().clone(), - self.highest_2chain_timeout_cert() - .map(|tc| tc.as_ref().clone()), - ) - } - - fn vote_back_pressure(&self) -> bool { - self.vote_back_pressure() - } - - fn pipeline_pending_latency(&self, proposal_timestamp: Duration) -> Duration { - self.pipeline_pending_latency(proposal_timestamp) + fn get_recent_block_execution_times(&self, num_blocks: usize) -> Vec { + let mut res = vec![]; + let mut cur_block = Some(self.ordered_root()); + loop { + match cur_block { + Some(block) => { + if let Some(execution_time_and_size) = block.get_execution_summary() { + info!( + "Found execution time for {}, {:?}", + block.id(), + execution_time_and_size + ); + res.push(execution_time_and_size); + if res.len() >= num_blocks { + return res; + } + } else { + info!("Couldn't find execution time for {}", block.id()); + } + cur_block = self.get_block(block.parent_id()); + }, + None => return res, + } + } } } diff --git a/consensus/src/block_storage/block_tree.rs b/consensus/src/block_storage/block_tree.rs index 5a9fc47f14d0a..0edb607579c72 100644 --- a/consensus/src/block_storage/block_tree.rs +++ b/consensus/src/block_storage/block_tree.rs @@ -432,7 +432,7 @@ impl BlockTree { .create_merged_with_executed_state(commit_decision) .expect("Inconsistent commit proof and evaluation decision, cannot commit block"); - let block_to_commit = blocks_to_commit.last().unwrap().clone(); + let block_to_commit = blocks_to_commit.last().expect("pipeline is empty").clone(); update_counters_for_committed_blocks(blocks_to_commit); let current_round = self.commit_root().round(); let committed_round = block_to_commit.round(); diff --git a/consensus/src/block_storage/mod.rs b/consensus/src/block_storage/mod.rs index 83fe9fd0450d7..51e6520f3d05e 100644 --- a/consensus/src/block_storage/mod.rs +++ b/consensus/src/block_storage/mod.rs @@ -3,11 +3,17 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_consensus_types::{ - pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, sync_info::SyncInfo, - timeout_2chain::TwoChainTimeoutCertificate, wrapped_ledger_info::WrappedLedgerInfo, + pipelined_block::{ExecutionSummary, PipelinedBlock}, + quorum_cert::QuorumCert, + sync_info::SyncInfo, + timeout_2chain::TwoChainTimeoutCertificate, + wrapped_ledger_info::WrappedLedgerInfo, }; use aptos_crypto::HashValue; -pub use block_store::{sync_manager::BlockRetriever, BlockStore}; +pub use block_store::{ + sync_manager::{BlockRetriever, NeedFetchResult}, + BlockStore, +}; use std::{sync::Arc, time::Duration}; mod block_store; @@ -42,6 +48,7 @@ pub trait BlockReader: Send + Sync { fn path_from_commit_root(&self, block_id: HashValue) -> Option>>; /// Return the certified block with the highest round. + #[cfg(test)] fn highest_certified_block(&self) -> Arc; /// Return the quorum certificate with the highest round @@ -64,4 +71,6 @@ pub trait BlockReader: Send + Sync { // Return time difference between last committed block and new proposal fn pipeline_pending_latency(&self, proposal_timestamp: Duration) -> Duration; + + fn get_recent_block_execution_times(&self, num_blocks: usize) -> Vec; } diff --git a/consensus/src/block_storage/sync_manager.rs b/consensus/src/block_storage/sync_manager.rs index d1e1842b28d52..7fad1188d4b05 100644 --- a/consensus/src/block_storage/sync_manager.rs +++ b/consensus/src/block_storage/sync_manager.rs @@ -19,7 +19,7 @@ use crate::{ monitor, network::{IncomingBlockRetrievalRequest, NetworkSender}, network_interface::ConsensusMsg, - payload_manager::PayloadManager, + payload_manager::TPayloadManager, persistent_liveness_storage::{LedgerRecoveryData, PersistentLivenessStorage, RecoveryData}, pipeline::execution_client::TExecutionClient, }; @@ -62,7 +62,8 @@ impl BlockStore { /// Check if we're far away from this ledger info and need to sync. /// This ensures that the block referred by the ledger info is not in buffer manager. pub fn need_sync_for_ledger_info(&self, li: &LedgerInfoWithSignatures) -> bool { - // TODO move min gap to fallback (30) to config. + // TODO move min gap to fallback (30) to config, and if configurable make sure the value is + // larger than buffer manager MAX_BACKLOG (20) (self.ordered_root().round() < li.commit_info().round() && !self.block_exists(li.commit_info().id())) || self.commit_root().round() + 30.max(2 * self.vote_back_pressure_limit) @@ -274,14 +275,8 @@ impl BlockStore { committed_round = root.0.round(), block_id = root.0.id(), ); - self.rebuild( - root, - root_metadata, - blocks, - quorum_certs, - self.order_vote_enabled, - ) - .await; + self.rebuild(root, root_metadata, blocks, quorum_certs) + .await; if highest_commit_cert.ledger_info().ledger_info().ends_epoch() { retriever @@ -301,7 +296,7 @@ impl BlockStore { retriever: &'a mut BlockRetriever, storage: Arc, execution_client: Arc, - payload_manager: Arc, + payload_manager: Arc, order_vote_enabled: bool, ) -> anyhow::Result { info!( @@ -692,7 +687,7 @@ impl BlockRetriever { // extend the result blocks let batch = result.blocks().clone(); progress += batch.len() as u64; - last_block_id = batch.last().unwrap().parent_id(); + last_block_id = batch.last().expect("Batch should not be empty").parent_id(); result_blocks.extend(batch); }, Ok(result) @@ -712,7 +707,13 @@ impl BlockRetriever { }, } } - assert_eq!(result_blocks.last().unwrap().id(), target_block_id); + assert_eq!( + result_blocks + .last() + .expect("Expected at least a result_block") + .id(), + target_block_id + ); Ok(result_blocks) } diff --git a/consensus/src/block_storage/tracing.rs b/consensus/src/block_storage/tracing.rs index 4552b19bd49c2..7e8f819ac546a 100644 --- a/consensus/src/block_storage/tracing.rs +++ b/consensus/src/block_storage/tracing.rs @@ -14,6 +14,7 @@ impl BlockStage { pub const EPOCH_MANAGER_RECEIVED: &'static str = "epoch_manager_received"; pub const EPOCH_MANAGER_VERIFIED: &'static str = "epoch_manager_verified"; pub const EXECUTED: &'static str = "executed"; + pub const EXECUTION_PIPELINE_INSERTED: &'static str = "execution_pipeline_inserted"; pub const NETWORK_RECEIVED: &'static str = "network_received"; pub const OC_ADDED: &'static str = "ordered_cert_created"; pub const ORDERED: &'static str = "ordered"; diff --git a/consensus/src/consensus_observer/error.rs b/consensus/src/consensus_observer/common/error.rs similarity index 84% rename from consensus/src/consensus_observer/error.rs rename to consensus/src/consensus_observer/common/error.rs index 43a496e8f30de..7fc6a78785a96 100644 --- a/consensus/src/consensus_observer/error.rs +++ b/consensus/src/consensus_observer/common/error.rs @@ -6,6 +6,9 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum Error { + #[error("Invalid message error: {0}")] + InvalidMessageError(String), + #[error("Network error: {0}")] NetworkError(String), @@ -18,6 +21,9 @@ pub enum Error { #[error("Subscription progress stopped: {0}")] SubscriptionProgressStopped(String), + #[error("Subscriptions reset: {0}")] + SubscriptionsReset(String), + #[error("Subscription suboptimal: {0}")] SubscriptionSuboptimal(String), @@ -32,10 +38,12 @@ impl Error { /// Returns a summary label for the error pub fn get_label(&self) -> &'static str { match self { + Self::InvalidMessageError(_) => "invalid_message_error", Self::NetworkError(_) => "network_error", Self::RpcError(_) => "rpc_error", Self::SubscriptionDisconnected(_) => "subscription_disconnected", Self::SubscriptionProgressStopped(_) => "subscription_progress_stopped", + Self::SubscriptionsReset(_) => "subscriptions_reset", Self::SubscriptionSuboptimal(_) => "subscription_suboptimal", Self::SubscriptionTimeout(_) => "subscription_timeout", Self::UnexpectedError(_) => "unexpected_error", diff --git a/consensus/src/consensus_observer/logging.rs b/consensus/src/consensus_observer/common/logging.rs similarity index 95% rename from consensus/src/consensus_observer/logging.rs rename to consensus/src/consensus_observer/common/logging.rs index bdadaf84d8eda..413ca81e3cd53 100644 --- a/consensus/src/consensus_observer/logging.rs +++ b/consensus/src/consensus_observer/common/logging.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::consensus_observer::error::Error; +use crate::consensus_observer::common::error::Error; use aptos_config::network_id::PeerNetworkId; use aptos_logger::Schema; use serde::Serialize; diff --git a/consensus/src/consensus_observer/metrics.rs b/consensus/src/consensus_observer/common/metrics.rs similarity index 62% rename from consensus/src/consensus_observer/metrics.rs rename to consensus/src/consensus_observer/common/metrics.rs index 0425a1e799d48..5888bbfcaca26 100644 --- a/consensus/src/consensus_observer/metrics.rs +++ b/consensus/src/consensus_observer/common/metrics.rs @@ -1,15 +1,25 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use aptos_config::network_id::{NetworkId, PeerNetworkId}; use aptos_metrics_core::{ - register_histogram_vec, register_int_counter_vec, register_int_gauge_vec, HistogramVec, - IntCounterVec, IntGaugeVec, + register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge_vec, + HistogramVec, IntCounter, IntCounterVec, IntGaugeVec, }; use once_cell::sync::Lazy; // Useful metric labels +pub const BLOCK_PAYLOAD_LABEL: &str = "block_payload"; +pub const COMMIT_DECISION_LABEL: &str = "commit_decision"; +pub const COMMITTED_BLOCKS_LABEL: &str = "committed_blocks"; pub const CREATED_SUBSCRIPTION_LABEL: &str = "created_subscription"; +pub const ORDERED_BLOCK_ENTRIES_LABEL: &str = "ordered_block_entries"; +pub const ORDERED_BLOCK_LABEL: &str = "ordered_block"; +pub const PENDING_BLOCK_ENTRIES_LABEL: &str = "pending_block_entries"; +pub const PENDING_BLOCKS_LABEL: &str = "pending_blocks"; +pub const STORED_PAYLOADS_LABEL: &str = "stored_payloads"; /// Counter for tracking created subscriptions for the consensus observer pub static OBSERVER_CREATED_SUBSCRIPTIONS: Lazy = Lazy::new(|| { @@ -21,7 +31,35 @@ pub static OBSERVER_CREATED_SUBSCRIPTIONS: Lazy = Lazy::new(|| { .unwrap() }); -/// Counter for tracking the number of active subscriptions for the consensus observer +/// Counter for tracking the number of times the block state was cleared by the consensus observer +pub static OBSERVER_CLEARED_BLOCK_STATE: Lazy = Lazy::new(|| { + register_int_counter!( + "consensus_observer_cleared_block_state", + "Counter for tracking the number of times the block state was cleared by the consensus observer", + ).unwrap() +}); + +/// Counter for tracking dropped (direct send) messages by the consensus observer +pub static OBSERVER_DROPPED_MESSAGES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "consensus_observer_dropped_messages", + "Counters related to dropped (direct send) messages by the consensus observer", + &["message_type", "network_id"] + ) + .unwrap() +}); + +/// Counter for tracking rejected (direct send) messages by the consensus observer +pub static OBSERVER_REJECTED_MESSAGES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "consensus_observer_rejected_messages", + "Counters related to rejected (direct send) messages by the consensus observer", + &["message_type", "network_id"] + ) + .unwrap() +}); + +/// Gauge for tracking the number of active subscriptions for the consensus observer pub static OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS: Lazy = Lazy::new(|| { register_int_gauge_vec!( "consensus_observer_num_active_subscriptions", @@ -31,6 +69,26 @@ pub static OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS: Lazy = Lazy::new(|| { .unwrap() }); +/// Gauge for tracking the number of processed blocks by the consensus observer +pub static OBSERVER_NUM_PROCESSED_BLOCKS: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "consensus_observer_num_processed_blocks", + "Gauge for tracking the number of processed blocks by the consensus observer", + &["processed_type"] + ) + .unwrap() +}); + +/// Gauge for tracking the processed block rounds by the consensus observer +pub static OBSERVER_PROCESSED_BLOCK_ROUNDS: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "consensus_observer_processed_block_rounds", + "Gauge for tracking the processed block rounds by the consensus observer", + &["processed_type"] + ) + .unwrap() +}); + /// Counter for tracking successful RPC responses received by the consensus observer pub static OBSERVER_RECEIVED_MESSAGE_RESPONSES: Lazy = Lazy::new(|| { register_int_counter_vec!( @@ -51,6 +109,16 @@ pub static OBSERVER_RECEIVED_MESSAGES: Lazy = Lazy::new(|| { .unwrap() }); +/// Gauge for tracking the rounds of received messages by the consensus observer +pub static OBSERVER_RECEIVED_MESSAGE_ROUNDS: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "consensus_observer_received_message_rounds", + "Gauge for tracking the rounds of received messages by the consensus observer", + &["message_type"] + ) + .unwrap() +}); + /// Counter for tracking RPC request latencies sent by the consensus observer pub static OBSERVER_REQUEST_LATENCIES: Lazy = Lazy::new(|| { register_histogram_vec!( @@ -101,7 +169,7 @@ pub static PENDING_CONSENSUS_OBSERVER_NETWORK_EVENTS: Lazy = Lazy .unwrap() }); -/// Counter for tracking the number of active subscribers for the consensus publisher +/// Gauge for tracking the number of active subscribers for the consensus publisher pub static PUBLISHER_NUM_ACTIVE_SUBSCRIBERS: Lazy = Lazy::new(|| { register_int_gauge_vec!( "consensus_publisher_num_active_subscribers", @@ -141,8 +209,8 @@ pub static PUBLISHER_SENT_MESSAGES: Lazy = Lazy::new(|| { .unwrap() }); -/// Increments the given request counter with the provided values -pub fn increment_request_counter( +/// Increments the given counter with the provided values +pub fn increment_counter( counter: &Lazy, label: &str, peer_network_id: &PeerNetworkId, @@ -153,6 +221,11 @@ pub fn increment_request_counter( .inc(); } +/// Increments the given counter without labels +pub fn increment_counter_without_labels(counter: &Lazy) { + counter.inc(); +} + /// Observes the value for the provided histogram and label pub fn observe_value_with_label( histogram: &Lazy, @@ -166,7 +239,12 @@ pub fn observe_value_with_label( .observe(value) } -/// Sets the gauge with the specific label and value +/// Sets the gauge with the specific network ID and value pub fn set_gauge(counter: &Lazy, network_id: &NetworkId, value: i64) { counter.with_label_values(&[network_id.as_str()]).set(value); } + +/// Sets the gauge with the specific label and value +pub fn set_gauge_with_label(counter: &Lazy, label: &str, value: u64) { + counter.with_label_values(&[label]).set(value as i64); +} diff --git a/consensus/src/consensus_observer/common/mod.rs b/consensus/src/consensus_observer/common/mod.rs new file mode 100644 index 0000000000000..a0ae24bb160e2 --- /dev/null +++ b/consensus/src/consensus_observer/common/mod.rs @@ -0,0 +1,6 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod error; +pub mod logging; +pub mod metrics; diff --git a/consensus/src/consensus_observer/mod.rs b/consensus/src/consensus_observer/mod.rs index 47822c104d47e..a12c4113358f3 100644 --- a/consensus/src/consensus_observer/mod.rs +++ b/consensus/src/consensus_observer/mod.rs @@ -1,12 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -pub mod error; -pub mod logging; -pub mod metrics; -pub mod network_client; -pub mod network_events; -pub mod network_message; +pub mod common; +pub mod network; pub mod observer; pub mod publisher; -mod subscription; diff --git a/consensus/src/consensus_observer/network/mod.rs b/consensus/src/consensus_observer/network/mod.rs new file mode 100644 index 0000000000000..1a7ad85635ff9 --- /dev/null +++ b/consensus/src/consensus_observer/network/mod.rs @@ -0,0 +1,7 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod network_events; +pub mod network_handler; +pub mod observer_client; +pub mod observer_message; diff --git a/consensus/src/consensus_observer/network_events.rs b/consensus/src/consensus_observer/network/network_events.rs similarity index 98% rename from consensus/src/consensus_observer/network_events.rs rename to consensus/src/consensus_observer/network/network_events.rs index 6f865f1568ae1..6aebb345425ba 100644 --- a/consensus/src/consensus_observer/network_events.rs +++ b/consensus/src/consensus_observer/network/network_events.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::consensus_observer::network_message::{ +use crate::consensus_observer::network::observer_message::{ ConsensusObserverMessage, ConsensusObserverResponse, }; use aptos_config::network_id::{NetworkId, PeerNetworkId}; diff --git a/consensus/src/consensus_observer/network/network_handler.rs b/consensus/src/consensus_observer/network/network_handler.rs new file mode 100644 index 0000000000000..bbaeca0dc4843 --- /dev/null +++ b/consensus/src/consensus_observer/network/network_handler.rs @@ -0,0 +1,836 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::logging::{LogEntry, LogSchema}, + network::{ + network_events::{ConsensusObserverNetworkEvents, NetworkMessage, ResponseSender}, + observer_message::{ + ConsensusObserverDirectSend, ConsensusObserverMessage, ConsensusObserverRequest, + }, + }, +}; +use aptos_channels::{ + aptos_channel, + aptos_channel::{Receiver, Sender}, + message_queues::QueueStyle, +}; +use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; +use aptos_logger::{error, info, warn}; +use futures::StreamExt; + +/// A simple struct that holds a message to be sent to the consensus observer +pub struct ConsensusObserverNetworkMessage { + peer_network_id: PeerNetworkId, + message: ConsensusObserverDirectSend, +} + +impl ConsensusObserverNetworkMessage { + pub fn new(peer_network_id: PeerNetworkId, message: ConsensusObserverDirectSend) -> Self { + Self { + peer_network_id, + message, + } + } + + /// Consumes and unpacks the message into its parts + pub fn into_parts(self) -> (PeerNetworkId, ConsensusObserverDirectSend) { + (self.peer_network_id, self.message) + } +} + +/// A simple struct that holds a message to be sent to the consensus publisher +pub struct ConsensusPublisherNetworkMessage { + peer_network_id: PeerNetworkId, + message: ConsensusObserverRequest, + response_sender: ResponseSender, +} + +impl ConsensusPublisherNetworkMessage { + pub fn new( + peer_network_id: PeerNetworkId, + message: ConsensusObserverRequest, + response_sender: ResponseSender, + ) -> Self { + Self { + peer_network_id, + message, + response_sender, + } + } + + /// Consumes and unpacks the message into its parts + pub fn into_parts(self) -> (PeerNetworkId, ConsensusObserverRequest, ResponseSender) { + (self.peer_network_id, self.message, self.response_sender) + } +} + +/// The network message handler that forwards messages to the consensus +/// observer and publisher, depending on the destination. +pub struct ConsensusObserverNetworkHandler { + // The consensus observer config + consensus_observer_config: ConsensusObserverConfig, + + // The stream of network events + network_service_events: ConsensusObserverNetworkEvents, + + // The sender for consensus observer messages + observer_message_sender: Sender<(), ConsensusObserverNetworkMessage>, + + // The sender for consensus publisher messages + publisher_message_sender: Sender<(), ConsensusPublisherNetworkMessage>, +} + +impl ConsensusObserverNetworkHandler { + pub fn new( + consensus_observer_config: ConsensusObserverConfig, + network_service_events: ConsensusObserverNetworkEvents, + ) -> ( + Self, + Receiver<(), ConsensusObserverNetworkMessage>, + Receiver<(), ConsensusPublisherNetworkMessage>, + ) { + // Create a channel for sending consensus observer messages + let (observer_message_sender, observer_message_receiver) = aptos_channel::new( + QueueStyle::FIFO, + consensus_observer_config.max_network_channel_size as usize, + None, + ); + + // Create a channel for sending consensus publisher messages + let (publisher_message_sender, publisher_message_receiver) = aptos_channel::new( + QueueStyle::FIFO, + consensus_observer_config.max_network_channel_size as usize, + None, + ); + + // Create the network handler + let network_handler = Self { + consensus_observer_config, + network_service_events, + observer_message_sender, + publisher_message_sender, + }; + + ( + network_handler, + observer_message_receiver, + publisher_message_receiver, + ) + } + + /// Starts the network handler that forwards messages to the observer and publisher + pub async fn start(mut self) { + info!(LogSchema::new(LogEntry::ConsensusObserver) + .message("Starting the consensus observer network handler!")); + + // Start the network message handler loop + loop { + tokio::select! { + Some(network_message) = self.network_service_events.next() => { + // Unpack the network message + let NetworkMessage { + peer_network_id, + protocol_id: _, + consensus_observer_message, + response_sender, + } = network_message; + + // Process the consensus observer message + match consensus_observer_message { + ConsensusObserverMessage::DirectSend(message) => { + self.handle_observer_message(peer_network_id, message); + }, + ConsensusObserverMessage::Request(request) => { + self.handle_publisher_message(peer_network_id, request, response_sender); + }, + ConsensusObserverMessage::Response(_) => { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Received unexpected response from peer: {}", + peer_network_id + )) + ); + }, + } + } + else => { + break; // Exit the network handler loop + } + } + } + + // Log an error that the network handler has stopped + error!(LogSchema::new(LogEntry::ConsensusObserver) + .message("Consensus observer network handler has stopped!")); + } + + /// Handles an observer message by forwarding it to the consensus observer + fn handle_observer_message( + &mut self, + peer_network_id: PeerNetworkId, + message: ConsensusObserverDirectSend, + ) { + // Drop the message if the observer is not enabled + if !self.consensus_observer_config.observer_enabled { + return; + } + + // Create the consensus observer message + let network_message = ConsensusObserverNetworkMessage::new(peer_network_id, message); + + // Send the message to the consensus observer + if let Err(error) = self.observer_message_sender.push((), network_message) { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to forward the observer message to the consensus observer! Error: {:?}", + error + )) + ); + } + } + + /// Handles a publisher message by forwarding it to the consensus publisher + fn handle_publisher_message( + &mut self, + peer_network_id: PeerNetworkId, + request: ConsensusObserverRequest, + response_sender: Option, + ) { + // Drop the message if the publisher is not enabled + if !self.consensus_observer_config.publisher_enabled { + return; + } + + // Ensure that the response sender is present + let response_sender = match response_sender { + Some(response_sender) => response_sender, + None => { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Missing response sender for the RPC request: {:?}", + request + )) + ); + return; // Something has gone wrong! + }, + }; + + // Create the consensus publisher message + let network_message = + ConsensusPublisherNetworkMessage::new(peer_network_id, request, response_sender); + + // Send the message to the consensus publisher + if let Err(error) = self.publisher_message_sender.push((), network_message) { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to forward the publisher request to the consensus publisher! Error: {:?}", + error + )) + ); + } + } +} + +#[cfg(test)] +mod test { + use crate::consensus_observer::network::{ + network_events::ConsensusObserverNetworkEvents, + network_handler::{ + ConsensusObserverNetworkHandler, ConsensusObserverNetworkMessage, + ConsensusPublisherNetworkMessage, + }, + observer_client::ConsensusObserverClient, + observer_message::{ + ConsensusObserverDirectSend, ConsensusObserverMessage, ConsensusObserverRequest, + }, + }; + use aptos_channels::{aptos_channel, aptos_channel::Receiver, message_queues::QueueStyle}; + use aptos_config::{ + config::ConsensusObserverConfig, + network_id::{NetworkId, PeerNetworkId}, + }; + use aptos_crypto::HashValue; + use aptos_network::{ + application::{ + interface::{NetworkClient, NetworkServiceEvents}, + storage::PeersAndMetadata, + }, + peer_manager::{ConnectionRequestSender, PeerManagerRequest, PeerManagerRequestSender}, + protocols::{ + network::{ + NetworkEvents, NetworkSender, NewNetworkEvents, NewNetworkSender, ReceivedMessage, + }, + wire::{ + handshake::v1::{ProtocolId, ProtocolIdSet}, + messaging::v1::{DirectSendMsg, NetworkMessage, RpcRequest}, + }, + }, + transport::ConnectionMetadata, + }; + use aptos_types::{ + aggregate_signature::AggregateSignature, + block_info::BlockInfo, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + PeerId, + }; + use futures::{FutureExt, StreamExt}; + use std::{collections::HashMap, hash::Hash, sync::Arc, time::Duration}; + use tokio::time::timeout; + + // Useful test constants for timeouts + const MAX_CHANNEL_TIMEOUT_SECS: u64 = 5; + const MAX_MESSAGE_WAIT_TIME_SECS: u64 = 5; + const RPC_REQUEST_TIMEOUT_MS: u64 = 10_000; + + #[tokio::test(flavor = "multi_thread")] + async fn test_handle_observer_message() { + // Create a consensus observer config with the observer enabled + let consensus_observer_config = ConsensusObserverConfig { + observer_enabled: true, + ..Default::default() + }; + + // Create the peers and metadata container + let network_ids = vec![NetworkId::Vfn, NetworkId::Public]; + let peers_and_metadata = PeersAndMetadata::new(&network_ids); + + // Create a single peer and initialize the connection metadata + let peer_network_id = + create_peer_and_connection(NetworkId::Public, peers_and_metadata.clone()); + + // Create the consensus observer client + let ( + network_senders, + network_events, + mut outbound_request_receivers, + mut inbound_request_senders, + ) = create_network_sender_and_events(&network_ids); + let consensus_observer_client = + create_observer_network_client(peers_and_metadata, network_senders); + + // Create the consensus observer network events + let observer_network_events = ConsensusObserverNetworkEvents::new(network_events); + + // Create the consensus observer network handler + let (network_handler, mut observer_message_receiver, mut publisher_message_receiver) = + ConsensusObserverNetworkHandler::new( + consensus_observer_config, + observer_network_events, + ); + + // Start the consensus observer network handler + tokio::spawn(network_handler.start()); + + // Create a consensus observer message + let consensus_observer_message = ConsensusObserverMessage::new_ordered_block_message( + vec![], + LedgerInfoWithSignatures::new( + LedgerInfo::new(BlockInfo::empty(), HashValue::zero()), + AggregateSignature::empty(), + ), + ); + + // Send the message to the network handler + send_observer_message( + &peer_network_id, + consensus_observer_client, + &consensus_observer_message, + ); + + // Wait for the message to be processed by the outbound handler + wait_for_outbound_processing( + peer_network_id, + &mut outbound_request_receivers, + &mut inbound_request_senders, + Some(ProtocolId::ConsensusObserver), + None, + false, + ) + .await; + + // Wait for the handler to process and forward the observer message + wait_for_handler_processing( + peer_network_id, + &mut observer_message_receiver, + &mut publisher_message_receiver, + Some(consensus_observer_message), + None, + ) + .await; + + // Verify no further message is received + wait_and_verify_no_message(&mut observer_message_receiver).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_handle_observer_message_disabled() { + // Create a consensus observer config with the observer disabled + let consensus_observer_config = ConsensusObserverConfig { + observer_enabled: false, + ..Default::default() + }; + + // Create the peers and metadata container + let network_ids = vec![NetworkId::Vfn, NetworkId::Public]; + let peers_and_metadata = PeersAndMetadata::new(&network_ids); + + // Create a single peer and initialize the connection metadata + let peer_network_id = + create_peer_and_connection(NetworkId::Public, peers_and_metadata.clone()); + + // Create the consensus observer client + let ( + network_senders, + network_events, + mut outbound_request_receivers, + mut inbound_request_senders, + ) = create_network_sender_and_events(&network_ids); + let consensus_observer_client = + create_observer_network_client(peers_and_metadata, network_senders); + + // Create the consensus observer network events + let observer_network_events = ConsensusObserverNetworkEvents::new(network_events); + + // Create the consensus observer network handler + let (network_handler, mut observer_message_receiver, _) = + ConsensusObserverNetworkHandler::new( + consensus_observer_config, + observer_network_events, + ); + + // Start the consensus observer network handler + tokio::spawn(network_handler.start()); + + // Create a consensus observer message + let consensus_observer_message = ConsensusObserverMessage::new_ordered_block_message( + vec![], + LedgerInfoWithSignatures::new( + LedgerInfo::new(BlockInfo::empty(), HashValue::zero()), + AggregateSignature::empty(), + ), + ); + + // Send the message to the network handler + send_observer_message( + &peer_network_id, + consensus_observer_client, + &consensus_observer_message, + ); + + // Wait for the message to be processed by the outbound handler + wait_for_outbound_processing( + peer_network_id, + &mut outbound_request_receivers, + &mut inbound_request_senders, + Some(ProtocolId::ConsensusObserver), + None, + false, + ) + .await; + + // Verify no message is received + wait_and_verify_no_message(&mut observer_message_receiver).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_handle_publisher_message() { + // Create a consensus observer config with the publisher enabled + let consensus_observer_config = ConsensusObserverConfig { + publisher_enabled: true, + ..Default::default() + }; + + // Create the peers and metadata container + let network_ids = vec![NetworkId::Vfn, NetworkId::Public]; + let peers_and_metadata = PeersAndMetadata::new(&network_ids); + + // Create a single peer and initialize the connection metadata + let peer_network_id = + create_peer_and_connection(NetworkId::Vfn, peers_and_metadata.clone()); + + // Create the consensus observer client + let ( + network_senders, + network_events, + mut outbound_request_receivers, + mut inbound_request_senders, + ) = create_network_sender_and_events(&network_ids); + let consensus_observer_client = + create_observer_network_client(peers_and_metadata, network_senders); + + // Create the consensus observer network events + let observer_network_events = ConsensusObserverNetworkEvents::new(network_events); + + // Create the consensus observer network handler + let (network_handler, mut observer_message_receiver, mut publisher_message_receiver) = + ConsensusObserverNetworkHandler::new( + consensus_observer_config, + observer_network_events, + ); + + // Start the consensus observer network handler + tokio::spawn(network_handler.start()); + + // Create a consensus publisher message + let consensus_publisher_message = ConsensusObserverRequest::Subscribe; + + // Send the message to the network handler + send_publisher_message( + peer_network_id, + consensus_observer_client, + consensus_publisher_message.clone(), + ); + + // Wait for the message to be processed by the outbound handler + wait_for_outbound_processing( + peer_network_id, + &mut outbound_request_receivers, + &mut inbound_request_senders, + None, + Some(ProtocolId::ConsensusObserverRpc), + true, + ) + .await; + + // Wait for the handler to process and forward the publisher message + wait_for_handler_processing( + peer_network_id, + &mut observer_message_receiver, + &mut publisher_message_receiver, + None, + Some(consensus_publisher_message), + ) + .await; + + // Verify no further message is received + wait_and_verify_no_message(&mut publisher_message_receiver).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_handle_publisher_message_disabled() { + // Create a consensus observer config with the publisher disabled + let consensus_observer_config = ConsensusObserverConfig { + publisher_enabled: false, + ..Default::default() + }; + + // Create the peers and metadata container + let network_ids = vec![NetworkId::Vfn, NetworkId::Public]; + let peers_and_metadata = PeersAndMetadata::new(&network_ids); + + // Create a single peer and initialize the connection metadata + let peer_network_id = + create_peer_and_connection(NetworkId::Public, peers_and_metadata.clone()); + + // Create the consensus observer client + let ( + network_senders, + network_events, + mut outbound_request_receivers, + mut inbound_request_senders, + ) = create_network_sender_and_events(&network_ids); + let consensus_observer_client = + create_observer_network_client(peers_and_metadata, network_senders); + + // Create the consensus observer network events + let observer_network_events = ConsensusObserverNetworkEvents::new(network_events); + + // Create the consensus observer network handler + let (network_handler, _, mut publisher_message_receiver) = + ConsensusObserverNetworkHandler::new( + consensus_observer_config, + observer_network_events, + ); + + // Start the consensus observer network handler + tokio::spawn(network_handler.start()); + + // Create a consensus publisher message + let consensus_publisher_message = ConsensusObserverRequest::Subscribe; + + // Send the message to the network handler + send_publisher_message( + peer_network_id, + consensus_observer_client, + consensus_publisher_message.clone(), + ); + + // Wait for the message to be processed by the outbound handler + wait_for_outbound_processing( + peer_network_id, + &mut outbound_request_receivers, + &mut inbound_request_senders, + None, + Some(ProtocolId::ConsensusObserverRpc), + true, + ) + .await; + + // Verify no message is received + wait_and_verify_no_message(&mut publisher_message_receiver).await; + } + + /// Creates and returns a single Aptos channel + fn create_aptos_channel( + ) -> (aptos_channel::Sender, aptos_channel::Receiver) { + aptos_channel::new(QueueStyle::FIFO, 10, None) + } + + /// Creates a network sender and events for testing (using the specified network IDs) + fn create_network_sender_and_events( + network_ids: &[NetworkId], + ) -> ( + HashMap>, + NetworkServiceEvents, + HashMap>, + HashMap>, + ) { + let mut network_senders = HashMap::new(); + let mut network_and_events = HashMap::new(); + let mut outbound_request_receivers = HashMap::new(); + let mut inbound_request_senders = HashMap::new(); + + for network_id in network_ids { + // Create the peer manager and connection channels + let (inbound_request_sender, inbound_request_receiver) = create_aptos_channel(); + let (outbound_request_sender, outbound_request_receiver) = create_aptos_channel(); + let (connection_outbound_sender, _connection_outbound_receiver) = + create_aptos_channel(); + + // Create the network sender and events + let network_sender = NetworkSender::new( + PeerManagerRequestSender::new(outbound_request_sender), + ConnectionRequestSender::new(connection_outbound_sender), + ); + let network_events = NetworkEvents::new(inbound_request_receiver, None, true); + + // Save the sender, events and receivers + network_senders.insert(*network_id, network_sender); + network_and_events.insert(*network_id, network_events); + outbound_request_receivers.insert(*network_id, outbound_request_receiver); + inbound_request_senders.insert(*network_id, inbound_request_sender); + } + + // Create the network service events + let network_service_events = NetworkServiceEvents::new(network_and_events); + + ( + network_senders, + network_service_events, + outbound_request_receivers, + inbound_request_senders, + ) + } + + /// Creates and returns a consensus observer network client + fn create_observer_network_client( + peers_and_metadata: Arc, + network_senders: HashMap>, + ) -> ConsensusObserverClient> { + let network_client: NetworkClient = NetworkClient::new( + vec![ProtocolId::ConsensusObserver], + vec![ProtocolId::ConsensusObserverRpc], + network_senders, + peers_and_metadata.clone(), + ); + ConsensusObserverClient::new(network_client) + } + + /// Creates a new peer with the specified connection metadata + fn create_peer_and_connection( + network_id: NetworkId, + peers_and_metadata: Arc, + ) -> PeerNetworkId { + // Create the peer and connection metadata + let peer_network_id = PeerNetworkId::new(network_id, PeerId::random()); + let mut connection_metadata = ConnectionMetadata::mock(peer_network_id.peer_id()); + + // Update the application protocols + let protocol_ids = vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]; + connection_metadata.application_protocols = ProtocolIdSet::from_iter(protocol_ids); + + // Insert the connection into peers and metadata + peers_and_metadata + .insert_connection_metadata(peer_network_id, connection_metadata.clone()) + .unwrap(); + + peer_network_id + } + + /// Sends a consensus observer message to the network handler + fn send_observer_message( + peer_network_id: &PeerNetworkId, + consensus_observer_client: ConsensusObserverClient>, + consensus_observer_message: &ConsensusObserverDirectSend, + ) { + // Serialize the message + let serialized_message = consensus_observer_client + .serialize_message_for_peer(peer_network_id, consensus_observer_message.clone()) + .unwrap(); + + // Send the message via the observer client + consensus_observer_client + .send_serialized_message_to_peer(peer_network_id, serialized_message, "") + .unwrap(); + } + + /// Sends a consensus publisher message to the network handler + fn send_publisher_message( + peer_network_id: PeerNetworkId, + consensus_observer_client: ConsensusObserverClient>, + consensus_publisher_message: ConsensusObserverRequest, + ) { + tokio::spawn(async move { + consensus_observer_client + .send_rpc_request_to_peer( + &peer_network_id, + consensus_publisher_message, + RPC_REQUEST_TIMEOUT_MS, + ) + .await + .unwrap() + }); + } + + /// Waits for a while to ensure that the handler has processed any + /// messages it receives and verifies that no message is received. + async fn wait_and_verify_no_message(message_receiver: &mut Receiver<(), Message>) { + // Wait for a while to ensure the handler has processed any message + tokio::time::sleep(Duration::from_secs(MAX_MESSAGE_WAIT_TIME_SECS)).await; + + // Verify that no message is received + assert!(message_receiver.select_next_some().now_or_never().is_none()); + } + + /// Waits for the network handler to process a message and forward + /// it to the appropriate receiver (observer or publisher). + async fn wait_for_handler_processing( + expected_peer_network_id: PeerNetworkId, + observer_message_receiver: &mut aptos_channel::Receiver< + (), + ConsensusObserverNetworkMessage, + >, + publisher_message_receiver: &mut aptos_channel::Receiver< + (), + ConsensusPublisherNetworkMessage, + >, + expected_observer_message: Option, + expected_publisher_message: Option, + ) { + // If we expect an observer message, wait for it and verify the contents + if let Some(expected_observer_message) = expected_observer_message { + match timeout(Duration::from_secs(MAX_CHANNEL_TIMEOUT_SECS), observer_message_receiver.select_next_some()).await { + Ok(observer_network_message) => { + let (peer_network_id, observer_message) = observer_network_message.into_parts(); + assert_eq!(peer_network_id, expected_peer_network_id); + assert_eq!(observer_message, expected_observer_message); + }, + Err(elapsed) => panic!( + "Timed out while waiting to receive a consensus observer message. Elapsed: {:?}", + elapsed + ), + } + } + + // If we expect a publisher message, wait for it and verify the contents + if let Some(expected_publisher_message) = expected_publisher_message { + match timeout(Duration::from_secs(MAX_CHANNEL_TIMEOUT_SECS), publisher_message_receiver.select_next_some()).await { + Ok(publisher_network_message) => { + let (peer_network_id, publisher_message, _) = publisher_network_message.into_parts(); + assert_eq!(peer_network_id, expected_peer_network_id); + assert_eq!(publisher_message, expected_publisher_message); + }, + Err(elapsed) => panic!( + "Timed out while waiting to receive a consensus publisher message. Elapsed: {:?}", + elapsed + ), + } + } + } + + /// Waits for an outbound message and passes it to the inbound + /// request senders (to emulate network wire transfer). + async fn wait_for_outbound_processing( + expected_peer_network_id: PeerNetworkId, + outbound_request_receivers: &mut HashMap< + NetworkId, + aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, + >, + inbound_request_senders: &mut HashMap< + NetworkId, + aptos_channel::Sender<(PeerId, ProtocolId), ReceivedMessage>, + >, + expected_direct_send_protocol: Option, + expected_rpc_protocol: Option, + is_rpc_request: bool, + ) { + // Extract the peer and network ID + let expected_peer_id = expected_peer_network_id.peer_id(); + let expected_network_id = expected_peer_network_id.network_id(); + + // Verify the message is received on the outbound request + // receivers and forward it to the inbound request senders. + let outbound_request_receiver = outbound_request_receivers + .get_mut(&expected_network_id) + .unwrap(); + match timeout(Duration::from_secs(MAX_CHANNEL_TIMEOUT_SECS), outbound_request_receiver.select_next_some()).await { + Ok(peer_manager_request) => { + let (protocol_id, peer_manager_notification) = match peer_manager_request { + PeerManagerRequest::SendRpc(peer_id, outbound_rpc_request) => { + // Verify the message is correct + assert!(is_rpc_request); + assert_eq!(peer_id, expected_peer_id); + assert_eq!(Some(outbound_rpc_request.protocol_id), expected_rpc_protocol); + assert_eq!(outbound_rpc_request.timeout, Duration::from_millis(RPC_REQUEST_TIMEOUT_MS)); + + // Create and return the received message + let received_message = ReceivedMessage { + message: NetworkMessage::RpcRequest(RpcRequest{ + protocol_id: outbound_rpc_request.protocol_id, + request_id: 0, + priority: 0, + raw_request: outbound_rpc_request.data.into(), + }), + sender: PeerNetworkId::new(expected_network_id, peer_id), + receive_timestamp_micros: 0, + rpc_replier: Some(Arc::new(outbound_rpc_request.res_tx)), + }; + (outbound_rpc_request.protocol_id, received_message) + } + PeerManagerRequest::SendDirectSend(peer_id, message) => { + // Verify the message is correct + assert!(!is_rpc_request); + assert_eq!(peer_id, expected_peer_id); + assert_eq!(Some(message.protocol_id), expected_direct_send_protocol); + + // Create and return the received message + let received_message = ReceivedMessage { + message: NetworkMessage::DirectSendMsg(DirectSendMsg{ + protocol_id: message.protocol_id, + priority: 0, + raw_msg: message.mdata.into(), + }), + sender: PeerNetworkId::new(expected_network_id, peer_id), + receive_timestamp_micros: 0, + rpc_replier: None, + }; + (message.protocol_id, received_message) + } + }; + + // Pass the message from the outbound request receivers to the + // inbound request senders. This emulates network wire transfer. + let inbound_request_sender = inbound_request_senders.get_mut(&expected_network_id).unwrap(); + inbound_request_sender.push((expected_peer_id, protocol_id), peer_manager_notification).unwrap(); + } + Err(elapsed) => panic!( + "Timed out while waiting to receive a message on the outbound receivers channel. Elapsed: {:?}", + elapsed + ), + } + } +} diff --git a/consensus/src/consensus_observer/network_client.rs b/consensus/src/consensus_observer/network/observer_client.rs similarity index 95% rename from consensus/src/consensus_observer/network_client.rs rename to consensus/src/consensus_observer/network/observer_client.rs index ea6806e6afe8f..33c4ce902af33 100644 --- a/consensus/src/consensus_observer/network_client.rs +++ b/consensus/src/consensus_observer/network/observer_client.rs @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use crate::consensus_observer::{ - error::Error, - logging::{LogEntry, LogEvent, LogSchema}, - metrics, - network_message::{ + common::{ + error::Error, + logging::{LogEntry, LogEvent, LogSchema}, + metrics, + }, + network::observer_message::{ ConsensusObserverDirectSend, ConsensusObserverMessage, ConsensusObserverRequest, ConsensusObserverResponse, }, @@ -44,7 +46,7 @@ impl> message_label: &str, ) -> Result<(), Error> { // Increment the message counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_SENT_MESSAGES, message_label, peer_network_id, @@ -72,7 +74,7 @@ impl> .message(&format!("Failed to send message: {:?}", error))); // Update the direct send error metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_SENT_MESSAGE_ERRORS, error.get_label(), peer_network_id, @@ -123,7 +125,7 @@ impl> .message(&format!("Failed to serialize message: {:?}", error))); // Update the direct send error metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_SENT_MESSAGE_ERRORS, error.get_label(), peer_network_id, @@ -145,7 +147,7 @@ impl> let request_id = rand::thread_rng().gen(); // Increment the request counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_SENT_REQUESTS, request.get_label(), peer_network_id, @@ -172,7 +174,7 @@ impl> match result { Ok(consensus_observer_response) => { // Update the RPC success metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_RECEIVED_MESSAGE_RESPONSES, request_label, peer_network_id, @@ -190,7 +192,7 @@ impl> .error(&error)); // Update the RPC error metrics - metrics::increment_request_counter( + metrics::increment_counter( &metrics::OBSERVER_SENT_MESSAGE_ERRORS, error.get_label(), peer_network_id, diff --git a/consensus/src/consensus_observer/network/observer_message.rs b/consensus/src/consensus_observer/network/observer_message.rs new file mode 100644 index 0000000000000..8b673f6335f56 --- /dev/null +++ b/consensus/src/consensus_observer/network/observer_message.rs @@ -0,0 +1,1386 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::common::error::Error; +use aptos_consensus_types::{ + common::{BatchPayload, Payload}, + pipelined_block::PipelinedBlock, + proof_of_store::{BatchInfo, ProofCache, ProofOfStore}, +}; +use aptos_crypto::hash::CryptoHash; +use aptos_types::{ + block_info::{BlockInfo, Round}, + epoch_change::Verifier, + epoch_state::EpochState, + ledger_info::LedgerInfoWithSignatures, + transaction::SignedTransaction, +}; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Display, Formatter}, + slice::Iter, + sync::Arc, +}; + +/// Types of messages that can be sent between the consensus publisher and observer +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum ConsensusObserverMessage { + Request(ConsensusObserverRequest), + Response(ConsensusObserverResponse), + DirectSend(ConsensusObserverDirectSend), +} + +impl ConsensusObserverMessage { + /// Creates and returns a new ordered block message using the given blocks and ordered proof + pub fn new_ordered_block_message( + blocks: Vec>, + ordered_proof: LedgerInfoWithSignatures, + ) -> ConsensusObserverDirectSend { + ConsensusObserverDirectSend::OrderedBlock(OrderedBlock { + blocks, + ordered_proof, + }) + } + + /// Creates and returns a new commit decision message using the given commit decision + pub fn new_commit_decision_message( + commit_proof: LedgerInfoWithSignatures, + ) -> ConsensusObserverDirectSend { + ConsensusObserverDirectSend::CommitDecision(CommitDecision { commit_proof }) + } + + /// Creates and returns a new block payload message using the given block, transactions and limit + pub fn new_block_payload_message( + block: BlockInfo, + transaction_payload: BlockTransactionPayload, + ) -> ConsensusObserverDirectSend { + ConsensusObserverDirectSend::BlockPayload(BlockPayload { + block, + transaction_payload, + }) + } +} + +impl Display for ConsensusObserverMessage { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ConsensusObserverMessage::Request(request) => { + write!(f, "ConsensusObserverRequest: {}", request) + }, + ConsensusObserverMessage::Response(response) => { + write!(f, "ConsensusObserverResponse: {}", response) + }, + ConsensusObserverMessage::DirectSend(direct_send) => { + write!(f, "ConsensusObserverDirectSend: {}", direct_send) + }, + } + } +} + +/// Types of requests that can be sent between the consensus publisher and observer +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum ConsensusObserverRequest { + Subscribe, + Unsubscribe, +} + +impl ConsensusObserverRequest { + /// Returns a summary label for the request + pub fn get_label(&self) -> &'static str { + match self { + ConsensusObserverRequest::Subscribe => "subscribe", + ConsensusObserverRequest::Unsubscribe => "unsubscribe", + } + } +} + +impl Display for ConsensusObserverRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.get_label()) + } +} + +/// Types of responses that can be sent between the consensus publisher and observer +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum ConsensusObserverResponse { + SubscribeAck, + UnsubscribeAck, +} + +impl ConsensusObserverResponse { + /// Returns a summary label for the response + pub fn get_label(&self) -> &'static str { + match self { + ConsensusObserverResponse::SubscribeAck => "subscribe_ack", + ConsensusObserverResponse::UnsubscribeAck => "unsubscribe_ack", + } + } +} + +impl Display for ConsensusObserverResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.get_label()) + } +} + +/// Types of direct sends that can be sent between the consensus publisher and observer +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum ConsensusObserverDirectSend { + OrderedBlock(OrderedBlock), + CommitDecision(CommitDecision), + BlockPayload(BlockPayload), +} + +impl ConsensusObserverDirectSend { + /// Returns a summary label for the direct send + pub fn get_label(&self) -> &'static str { + match self { + ConsensusObserverDirectSend::OrderedBlock(_) => "ordered_block", + ConsensusObserverDirectSend::CommitDecision(_) => "commit_decision", + ConsensusObserverDirectSend::BlockPayload(_) => "block_payload", + } + } +} + +impl Display for ConsensusObserverDirectSend { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ConsensusObserverDirectSend::OrderedBlock(ordered_block) => { + write!(f, "OrderedBlock: {}", ordered_block.proof_block_info()) + }, + ConsensusObserverDirectSend::CommitDecision(commit_decision) => { + write!(f, "CommitDecision: {}", commit_decision.proof_block_info()) + }, + ConsensusObserverDirectSend::BlockPayload(block_payload) => { + write!( + f, + "BlockPayload: {}. Number of transactions: {}, limit: {:?}, proofs: {:?}", + block_payload.block, + block_payload.transaction_payload.transactions().len(), + block_payload.transaction_payload.limit(), + block_payload.transaction_payload.payload_proofs(), + ) + }, + } + } +} + +/// OrderedBlock message contains the ordered blocks and the proof of the ordering +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct OrderedBlock { + blocks: Vec>, + ordered_proof: LedgerInfoWithSignatures, +} + +impl OrderedBlock { + pub fn new(blocks: Vec>, ordered_proof: LedgerInfoWithSignatures) -> Self { + Self { + blocks, + ordered_proof, + } + } + + /// Returns a reference to the ordered blocks + pub fn blocks(&self) -> &Vec> { + &self.blocks + } + + /// Returns a copy of the first ordered block + pub fn first_block(&self) -> Arc { + self.blocks + .first() + .cloned() + .expect("At least one block is expected!") + } + + /// Returns a copy of the last ordered block + pub fn last_block(&self) -> Arc { + self.blocks + .last() + .cloned() + .expect("At least one block is expected!") + } + + /// Returns a reference to the ordered proof + pub fn ordered_proof(&self) -> &LedgerInfoWithSignatures { + &self.ordered_proof + } + + /// Returns a reference to the ordered proof block info + pub fn proof_block_info(&self) -> &BlockInfo { + self.ordered_proof.commit_info() + } + + /// Verifies the ordered blocks and returns an error if the data is invalid. + /// Note: this does not check the ordered proof. + pub fn verify_ordered_blocks(&self) -> Result<(), Error> { + // Verify that we have at least one ordered block + if self.blocks.is_empty() { + return Err(Error::InvalidMessageError( + "Received empty ordered block!".to_string(), + )); + } + + // Verify the last block ID matches the ordered proof block ID + if self.last_block().id() != self.proof_block_info().id() { + return Err(Error::InvalidMessageError( + format!( + "Last ordered block ID does not match the ordered proof ID! Number of blocks: {:?}, Last ordered block ID: {:?}, Ordered proof ID: {:?}", + self.blocks.len(), + self.last_block().id(), + self.proof_block_info().id() + ) + )); + } + + // Verify the blocks are correctly chained together (from the last block to the first) + let mut expected_parent_id = None; + for block in self.blocks.iter().rev() { + if let Some(expected_parent_id) = expected_parent_id { + if block.id() != expected_parent_id { + return Err(Error::InvalidMessageError( + format!( + "Block parent ID does not match the expected parent ID! Block ID: {:?}, Expected parent ID: {:?}", + block.id(), + expected_parent_id + ) + )); + } + } + + expected_parent_id = Some(block.parent_id()); + } + + Ok(()) + } + + /// Verifies the ordered proof and returns an error if the proof is invalid + pub fn verify_ordered_proof(&self, epoch_state: &EpochState) -> Result<(), Error> { + epoch_state.verify(&self.ordered_proof).map_err(|error| { + Error::InvalidMessageError(format!( + "Failed to verify ordered proof ledger info: {:?}, Error: {:?}", + self.proof_block_info(), + error + )) + }) + } +} + +/// CommitDecision message contains the commit decision proof +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct CommitDecision { + commit_proof: LedgerInfoWithSignatures, +} + +impl CommitDecision { + pub fn new(commit_proof: LedgerInfoWithSignatures) -> Self { + Self { commit_proof } + } + + /// Returns a reference to the commit proof + pub fn commit_proof(&self) -> &LedgerInfoWithSignatures { + &self.commit_proof + } + + /// Returns the epoch of the commit proof + pub fn epoch(&self) -> u64 { + self.commit_proof.ledger_info().epoch() + } + + /// Returns a reference to the commit proof block info + pub fn proof_block_info(&self) -> &BlockInfo { + self.commit_proof.commit_info() + } + + /// Returns the round of the commit proof + pub fn round(&self) -> Round { + self.commit_proof.ledger_info().round() + } + + /// Verifies the commit proof and returns an error if the proof is invalid + pub fn verify_commit_proof(&self, epoch_state: &EpochState) -> Result<(), Error> { + epoch_state.verify(&self.commit_proof).map_err(|error| { + Error::InvalidMessageError(format!( + "Failed to verify commit proof ledger info: {:?}, Error: {:?}", + self.proof_block_info(), + error + )) + }) + } +} + +/// The transaction payload and proof of each block +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct PayloadWithProof { + transactions: Vec, + proofs: Vec, +} + +impl PayloadWithProof { + pub fn new(transactions: Vec, proofs: Vec) -> Self { + Self { + transactions, + proofs, + } + } + + #[cfg(test)] + /// Returns an empty payload with proof (for testing) + pub fn empty() -> Self { + Self { + transactions: vec![], + proofs: vec![], + } + } +} + +/// The transaction payload and proof of each block with a transaction limit +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct PayloadWithProofAndLimit { + payload_with_proof: PayloadWithProof, + transaction_limit: Option, +} + +impl PayloadWithProofAndLimit { + pub fn new(payload_with_proof: PayloadWithProof, limit: Option) -> Self { + Self { + payload_with_proof, + transaction_limit: limit, + } + } + + #[cfg(test)] + /// Returns an empty payload with proof and limit (for testing) + pub fn empty() -> Self { + Self { + payload_with_proof: PayloadWithProof::empty(), + transaction_limit: None, + } + } +} + +/// The transaction payload of each block +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BlockTransactionPayload { + InQuorumStore(PayloadWithProof), + InQuorumStoreWithLimit(PayloadWithProofAndLimit), + QuorumStoreInlineHybrid(PayloadWithProofAndLimit, Vec), + OptQuorumStore(PayloadWithProofAndLimit, Vec), +} + +impl BlockTransactionPayload { + /// Creates a returns a new InQuorumStore transaction payload + pub fn new_in_quorum_store( + transactions: Vec, + proofs: Vec, + ) -> Self { + let payload_with_proof = PayloadWithProof::new(transactions, proofs); + Self::InQuorumStore(payload_with_proof) + } + + /// Creates a returns a new InQuorumStoreWithLimit transaction payload + pub fn new_in_quorum_store_with_limit( + transactions: Vec, + proofs: Vec, + limit: Option, + ) -> Self { + let payload_with_proof = PayloadWithProof::new(transactions, proofs); + let proof_with_limit = PayloadWithProofAndLimit::new(payload_with_proof, limit); + Self::InQuorumStoreWithLimit(proof_with_limit) + } + + /// Creates a returns a new QuorumStoreInlineHybrid transaction payload + pub fn new_quorum_store_inline_hybrid( + transactions: Vec, + proofs: Vec, + limit: Option, + inline_batches: Vec, + ) -> Self { + let payload_with_proof = PayloadWithProof::new(transactions, proofs); + let proof_with_limit = PayloadWithProofAndLimit::new(payload_with_proof, limit); + Self::QuorumStoreInlineHybrid(proof_with_limit, inline_batches) + } + + pub fn new_opt_quorum_store( + transactions: Vec, + proofs: Vec, + limit: Option, + batch_infos: Vec, + ) -> Self { + let payload_with_proof = PayloadWithProof::new(transactions, proofs); + let proof_with_limit = PayloadWithProofAndLimit::new(payload_with_proof, limit); + Self::OptQuorumStore(proof_with_limit, batch_infos) + } + + #[cfg(test)] + /// Returns an empty transaction payload (for testing) + pub fn empty() -> Self { + Self::QuorumStoreInlineHybrid(PayloadWithProofAndLimit::empty(), vec![]) + } + + /// Returns the list of inline batches in the transaction payload + pub fn inline_batches(&self) -> Vec<&BatchInfo> { + match self { + BlockTransactionPayload::QuorumStoreInlineHybrid(_, inline_batches) => { + inline_batches.iter().collect() + }, + _ => vec![], + } + } + + /// Returns the limit of the transaction payload + pub fn limit(&self) -> Option { + match self { + BlockTransactionPayload::InQuorumStore(_) => None, + BlockTransactionPayload::InQuorumStoreWithLimit(payload) => payload.transaction_limit, + BlockTransactionPayload::QuorumStoreInlineHybrid(payload, _) => { + payload.transaction_limit + }, + BlockTransactionPayload::OptQuorumStore(payload, _) => payload.transaction_limit, + } + } + + /// Returns the proofs of the transaction payload + pub fn payload_proofs(&self) -> Vec { + match self { + BlockTransactionPayload::InQuorumStore(payload) => payload.proofs.clone(), + BlockTransactionPayload::InQuorumStoreWithLimit(payload) => { + payload.payload_with_proof.proofs.clone() + }, + BlockTransactionPayload::QuorumStoreInlineHybrid(payload, _) => { + payload.payload_with_proof.proofs.clone() + }, + BlockTransactionPayload::OptQuorumStore(payload, _) => { + payload.payload_with_proof.proofs.clone() + }, + } + } + + /// Returns the transactions in the payload + pub fn transactions(&self) -> Vec { + match self { + BlockTransactionPayload::InQuorumStore(payload) => payload.transactions.clone(), + BlockTransactionPayload::InQuorumStoreWithLimit(payload) => { + payload.payload_with_proof.transactions.clone() + }, + BlockTransactionPayload::QuorumStoreInlineHybrid(payload, _) => { + payload.payload_with_proof.transactions.clone() + }, + BlockTransactionPayload::OptQuorumStore(payload, _) => { + payload.payload_with_proof.transactions.clone() + }, + } + } + + /// Verifies the transaction payload against the given ordered block payload + pub fn verify_against_ordered_payload( + &self, + ordered_block_payload: &Payload, + ) -> Result<(), Error> { + match ordered_block_payload { + Payload::DirectMempool(_) => { + return Err(Error::InvalidMessageError( + "Direct mempool payloads are not supported for consensus observer!".into(), + )); + }, + Payload::InQuorumStore(proof_with_data) => { + // Verify the batches in the requested block + self.verify_batches(&proof_with_data.proofs)?; + }, + Payload::InQuorumStoreWithLimit(proof_with_data) => { + // Verify the batches in the requested block + self.verify_batches(&proof_with_data.proof_with_data.proofs)?; + + // Verify the transaction limit + self.verify_transaction_limit(proof_with_data.max_txns_to_execute)?; + }, + Payload::QuorumStoreInlineHybrid( + inline_batches, + proof_with_data, + max_txns_to_execute, + ) => { + // Verify the batches in the requested block + self.verify_batches(&proof_with_data.proofs)?; + + // Verify the inline batches + self.verify_inline_batches(inline_batches)?; + + // Verify the transaction limit + self.verify_transaction_limit(*max_txns_to_execute)?; + }, + Payload::OptQuorumStore(opt_qs_payload) => { + // Verify the batches in the requested block + self.verify_batches(opt_qs_payload.proof_with_data())?; + + // Verify the inline batches + self.verify_opt_batches(opt_qs_payload.opt_batches())?; + + // Verify the transaction limit + self.verify_transaction_limit(opt_qs_payload.max_txns_to_execute())?; + }, + } + + Ok(()) + } + + /// Verifies the payload batches against the expected batches + fn verify_batches(&self, expected_proofs: &[ProofOfStore]) -> Result<(), Error> { + // Get the batches in the block transaction payload + let payload_proofs = self.payload_proofs(); + let payload_batches: Vec<&BatchInfo> = + payload_proofs.iter().map(|proof| proof.info()).collect(); + + // Compare the expected batches against the payload batches + let expected_batches: Vec<&BatchInfo> = + expected_proofs.iter().map(|proof| proof.info()).collect(); + if expected_batches != payload_batches { + return Err(Error::InvalidMessageError(format!( + "Transaction payload failed batch verification! Expected batches {:?}, but found {:?}!", + expected_batches, payload_batches + ))); + } + + Ok(()) + } + + /// Verifies the inline batches against the expected inline batches + fn verify_inline_batches( + &self, + expected_inline_batches: &[(BatchInfo, Vec)], + ) -> Result<(), Error> { + // Get the expected inline batches + let expected_inline_batches: Vec<&BatchInfo> = expected_inline_batches + .iter() + .map(|(batch_info, _)| batch_info) + .collect(); + + // Get the inline batches in the payload + let inline_batches: Vec<&BatchInfo> = match self { + BlockTransactionPayload::QuorumStoreInlineHybrid(_, inline_batches) => { + inline_batches.iter().map(|batch_info| batch_info).collect() + }, + _ => { + return Err(Error::InvalidMessageError( + "Transaction payload does not contain inline batches!".to_string(), + )) + }, + }; + + // Compare the expected inline batches against the payload inline batches + if expected_inline_batches != inline_batches { + return Err(Error::InvalidMessageError(format!( + "Transaction payload failed inline batch verification! Expected inline batches {:?} but found {:?}", + expected_inline_batches, inline_batches + ))); + } + + Ok(()) + } + + fn verify_opt_batches(&self, expected_opt_batches: &Vec) -> Result<(), Error> { + let opt_batches: &Vec = match self { + BlockTransactionPayload::OptQuorumStore(_, opt_batches) => opt_batches, + _ => { + return Err(Error::InvalidMessageError( + "Transaction payload is not an OptQS Payload".to_string(), + )) + }, + }; + + if expected_opt_batches != opt_batches { + return Err(Error::InvalidMessageError(format!( + "Transaction payload failed optimistic batch verification! Expected optimistic batches {:?} but found {:?}", + expected_opt_batches, opt_batches + ))); + } + Ok(()) + } + + /// Verifies the payload limit against the expected limit + fn verify_transaction_limit( + &self, + expected_transaction_limit: Option, + ) -> Result<(), Error> { + // Get the payload limit + let limit = match self { + BlockTransactionPayload::InQuorumStoreWithLimit(payload) => payload.transaction_limit, + BlockTransactionPayload::QuorumStoreInlineHybrid(payload, _) => { + payload.transaction_limit + }, + _ => { + return Err(Error::InvalidMessageError( + "Transaction payload does not contain a limit!".to_string(), + )) + }, + }; + + // Compare the expected limit against the payload limit + if expected_transaction_limit != limit { + return Err(Error::InvalidMessageError(format!( + "Transaction payload failed limit verification! Expected limit: {:?}, Found limit: {:?}", + expected_transaction_limit, limit + ))); + } + + Ok(()) + } +} + +/// Payload message contains the block and transaction payload +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct BlockPayload { + block: BlockInfo, + transaction_payload: BlockTransactionPayload, +} + +impl BlockPayload { + pub fn new(block: BlockInfo, transaction_payload: BlockTransactionPayload) -> Self { + Self { + block, + transaction_payload, + } + } + + /// Returns a reference to the block info + pub fn block(&self) -> &BlockInfo { + &self.block + } + + /// Returns the epoch of the block info + pub fn epoch(&self) -> u64 { + self.block.epoch() + } + + /// Returns the round of the block info + pub fn round(&self) -> Round { + self.block.round() + } + + /// Returns a reference to the block transaction payload + pub fn transaction_payload(&self) -> &BlockTransactionPayload { + &self.transaction_payload + } + + /// Verifies the block payload digests and returns an error if the data is invalid + pub fn verify_payload_digests(&self) -> Result<(), Error> { + // Get the transactions, payload proofs and inline batches + let transactions = self.transaction_payload.transactions(); + let payload_proofs = self.transaction_payload.payload_proofs(); + let inline_batches = self.transaction_payload.inline_batches(); + + // Get the number of transactions, payload proofs and inline batches + let num_transactions = transactions.len(); + let num_payload_proofs = payload_proofs.len(); + let num_inline_batches = inline_batches.len(); + + // Verify the payload proof digests using the transactions + let mut transactions_iter = transactions.iter(); + for proof_of_store in &payload_proofs { + reconstruct_and_verify_batch(&mut transactions_iter, proof_of_store.info()).map_err( + |error| { + Error::InvalidMessageError(format!( + "Failed to verify payload proof digests! Num transactions: {:?}, \ + num batches: {:?}, num inline batches: {:?}, failed batch: {:?}, Error: {:?}", + num_transactions, num_payload_proofs, num_inline_batches, proof_of_store.info(), error + )) + }, + )?; + } + + // Verify the inline batch digests using the transactions + for batch_info in inline_batches.into_iter() { + reconstruct_and_verify_batch(&mut transactions_iter, batch_info).map_err( + |error| { + Error::InvalidMessageError(format!( + "Failed to verify inline batch digests! Num transactions: {:?}, \ + num batches: {:?}, num inline batches: {:?}, failed batch: {:?}, Error: {:?}", + num_transactions, num_payload_proofs, num_inline_batches, batch_info, error + )) + }, + )?; + } + + // Verify that there are no transactions remaining (all transactions should be consumed) + let remaining_transactions = transactions_iter.as_slice(); + if !remaining_transactions.is_empty() { + return Err(Error::InvalidMessageError(format!( + "Failed to verify payload transactions! Num transactions: {:?}, \ + transactions remaining: {:?}. Expected: 0", + num_transactions, + remaining_transactions.len() + ))); + } + + Ok(()) // All digests match + } + + /// Verifies that the block payload proofs are correctly signed according + /// to the current epoch state. Returns an error if the data is invalid. + pub fn verify_payload_signatures(&self, epoch_state: &EpochState) -> Result<(), Error> { + // Create a dummy proof cache to verify the proofs + let proof_cache = ProofCache::new(1); + + // TODO: parallelize the verification of the proof signatures! + + // Verify each of the proof signatures + let validator_verifier = &epoch_state.verifier; + for proof_of_store in &self.transaction_payload.payload_proofs() { + if let Err(error) = proof_of_store.verify(validator_verifier, &proof_cache) { + return Err(Error::InvalidMessageError(format!( + "Failed to verify the proof of store for batch: {:?}, Error: {:?}", + proof_of_store.info(), + error + ))); + } + } + + Ok(()) // All proofs are correctly signed + } +} + +/// Reconstructs and verifies the batch using the +/// given transactions and the expected batch info. +fn reconstruct_and_verify_batch( + transactions_iter: &mut Iter, + expected_batch_info: &BatchInfo, +) -> Result<(), Error> { + // Gather the transactions for the batch + let mut batch_transactions = vec![]; + for i in 0..expected_batch_info.num_txns() { + let batch_transaction = match transactions_iter.next() { + Some(transaction) => transaction, + None => { + return Err(Error::InvalidMessageError(format!( + "Failed to extract transaction during batch reconstruction! Batch: {:?}, transaction index: {:?}", + expected_batch_info, i + ))); + }, + }; + batch_transactions.push(batch_transaction.clone()); + } + + // Calculate the batch digest + let batch_payload = BatchPayload::new(expected_batch_info.author(), batch_transactions); + let batch_digest = batch_payload.hash(); + + // Verify the reconstructed digest against the expected digest + let expected_digest = expected_batch_info.digest(); + if batch_digest != *expected_digest { + return Err(Error::InvalidMessageError(format!( + "The reconstructed batch digest does not match the expected digest! \ + Batch: {:?}, Expected digest: {:?}, Reconstructed digest: {:?}", + expected_batch_info, expected_digest, batch_digest + ))); + } + + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + use aptos_bitvec::BitVec; + use aptos_consensus_types::{ + block::Block, + block_data::{BlockData, BlockType}, + common::{Author, ProofWithData, ProofWithDataWithTxnLimit}, + proof_of_store::BatchId, + quorum_cert::QuorumCert, + }; + use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, SigningKey, Uniform}; + use aptos_types::{ + aggregate_signature::AggregateSignature, + chain_id::ChainId, + ledger_info::LedgerInfo, + transaction::{RawTransaction, Script, TransactionPayload}, + validator_signer::ValidatorSigner, + validator_verifier::{ValidatorConsensusInfo, ValidatorVerifier}, + PeerId, + }; + use claims::assert_matches; + use move_core_types::account_address::AccountAddress; + + #[test] + fn test_verify_against_ordered_payload_mempool() { + // Create an empty transaction payload + let transaction_payload = BlockTransactionPayload::new_in_quorum_store(vec![], vec![]); + + // Create a direct mempool payload + let ordered_payload = Payload::DirectMempool(vec![]); + + // Verify the transaction payload and ensure it fails (mempool payloads are not supported) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + } + + #[test] + fn test_verify_against_ordered_payload_in_qs() { + // Create an empty transaction payload with no proofs + let proofs = vec![]; + let transaction_payload = + BlockTransactionPayload::new_in_quorum_store(vec![], proofs.clone()); + + // Create a quorum store payload with a single proof + let batch_info = create_batch_info(); + let proof_with_data = ProofWithData::new(vec![ProofOfStore::new( + batch_info, + AggregateSignature::empty(), + )]); + let ordered_payload = Payload::InQuorumStore(proof_with_data); + + // Verify the transaction payload and ensure it fails (the batch infos don't match) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a quorum store payload with no proofs + let proof_with_data = ProofWithData::new(proofs); + let ordered_payload = Payload::InQuorumStore(proof_with_data); + + // Verify the transaction payload and ensure it passes + transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap(); + } + + #[test] + fn test_verify_against_ordered_payload_in_qs_limit() { + // Create an empty transaction payload with no proofs + let proofs = vec![]; + let transaction_limit = Some(10); + let transaction_payload = BlockTransactionPayload::new_in_quorum_store_with_limit( + vec![], + proofs.clone(), + transaction_limit, + ); + + // Create a quorum store payload with a single proof + let batch_info = create_batch_info(); + let proof_with_data = ProofWithDataWithTxnLimit::new( + ProofWithData::new(vec![ProofOfStore::new( + batch_info, + AggregateSignature::empty(), + )]), + transaction_limit, + ); + let ordered_payload = Payload::InQuorumStoreWithLimit(proof_with_data); + + // Verify the transaction payload and ensure it fails (the batch infos don't match) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a quorum store payload with no proofs and no transaction limit + let proof_with_data = + ProofWithDataWithTxnLimit::new(ProofWithData::new(proofs.clone()), None); + let ordered_payload = Payload::InQuorumStoreWithLimit(proof_with_data); + + // Verify the transaction payload and ensure it fails (the transaction limit doesn't match) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a quorum store payload with no proofs and the correct limit + let proof_with_data = + ProofWithDataWithTxnLimit::new(ProofWithData::new(proofs), transaction_limit); + let ordered_payload = Payload::InQuorumStoreWithLimit(proof_with_data); + + // Verify the transaction payload and ensure it passes + transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap(); + } + + #[test] + fn test_verify_against_ordered_payload_in_qs_hybrid() { + // Create an empty transaction payload with no proofs and no inline batches + let proofs = vec![]; + let transaction_limit = Some(100); + let inline_batches = vec![]; + let transaction_payload = BlockTransactionPayload::new_quorum_store_inline_hybrid( + vec![], + proofs.clone(), + transaction_limit, + inline_batches.clone(), + ); + + // Create a quorum store payload with a single proof + let inline_batches = vec![]; + let batch_info = create_batch_info(); + let proof_with_data = ProofWithData::new(vec![ProofOfStore::new( + batch_info, + AggregateSignature::empty(), + )]); + let ordered_payload = Payload::QuorumStoreInlineHybrid( + inline_batches.clone(), + proof_with_data, + transaction_limit, + ); + + // Verify the transaction payload and ensure it fails (the batch infos don't match) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a quorum store payload with no transaction limit + let proof_with_data = ProofWithData::new(vec![]); + let ordered_payload = + Payload::QuorumStoreInlineHybrid(inline_batches.clone(), proof_with_data, None); + + // Verify the transaction payload and ensure it fails (the transaction limit doesn't match) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a quorum store payload with a single inline batch + let proof_with_data = ProofWithData::new(vec![]); + let ordered_payload = Payload::QuorumStoreInlineHybrid( + vec![(create_batch_info(), vec![])], + proof_with_data, + transaction_limit, + ); + + // Verify the transaction payload and ensure it fails (the inline batches don't match) + let error = transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create an empty quorum store payload + let proof_with_data = ProofWithData::new(vec![]); + let ordered_payload = + Payload::QuorumStoreInlineHybrid(vec![], proof_with_data, transaction_limit); + + // Verify the transaction payload and ensure it passes + transaction_payload + .verify_against_ordered_payload(&ordered_payload) + .unwrap(); + } + + #[test] + fn test_verify_commit_proof() { + // Create a ledger info with an empty signature set + let current_epoch = 0; + let ledger_info = create_empty_ledger_info(current_epoch); + + // Create an epoch state for the current epoch (with an empty verifier) + let epoch_state = EpochState::new(current_epoch, ValidatorVerifier::new(vec![])); + + // Create a commit decision message with the ledger info + let commit_decision = CommitDecision::new(ledger_info); + + // Verify the commit proof and ensure it passes + commit_decision.verify_commit_proof(&epoch_state).unwrap(); + + // Create an epoch state for the current epoch (with a non-empty verifier) + let validator_signer = ValidatorSigner::random(None); + let validator_consensus_info = ValidatorConsensusInfo::new( + validator_signer.author(), + validator_signer.public_key(), + 100, + ); + let validator_verifier = ValidatorVerifier::new(vec![validator_consensus_info]); + let epoch_state = EpochState::new(current_epoch, validator_verifier.clone()); + + // Verify the commit proof and ensure it fails (the signature set is insufficient) + let error = commit_decision + .verify_commit_proof(&epoch_state) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + } + + #[test] + fn test_verify_ordered_blocks() { + // Create an ordered block with no internal blocks + let current_epoch = 0; + let ordered_block = OrderedBlock::new(vec![], create_empty_ledger_info(current_epoch)); + + // Verify the ordered blocks and ensure it fails (there are no internal blocks) + let error = ordered_block.verify_ordered_blocks().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a pipelined block with a random block ID + let block_id = HashValue::random(); + let block_info = create_block_info(current_epoch, block_id); + let pipelined_block = create_pipelined_block(block_info.clone()); + + // Create an ordered block with the pipelined block and random proof + let ordered_block = OrderedBlock::new( + vec![pipelined_block.clone()], + create_empty_ledger_info(current_epoch), + ); + + // Verify the ordered blocks and ensure it fails (the block IDs don't match) + let error = ordered_block.verify_ordered_blocks().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create an ordered block proof with the same block ID + let ordered_proof = LedgerInfoWithSignatures::new( + LedgerInfo::new(block_info, HashValue::random()), + AggregateSignature::empty(), + ); + + // Create an ordered block with the correct proof + let ordered_block = OrderedBlock::new(vec![pipelined_block], ordered_proof); + + // Verify the ordered block and ensure it passes + ordered_block.verify_ordered_blocks().unwrap(); + } + + #[test] + fn test_verify_ordered_blocks_chained() { + // Create multiple pipelined blocks not chained together + let current_epoch = 0; + let mut pipelined_blocks = vec![]; + for _ in 0..3 { + // Create the pipelined block + let block_id = HashValue::random(); + let block_info = create_block_info(current_epoch, block_id); + let pipelined_block = create_pipelined_block(block_info); + + // Add the pipelined block to the list + pipelined_blocks.push(pipelined_block); + } + + // Create an ordered block proof with the same block ID as the last pipelined block + let last_block_info = pipelined_blocks.last().unwrap().block_info().clone(); + let ordered_proof = LedgerInfoWithSignatures::new( + LedgerInfo::new(last_block_info, HashValue::random()), + AggregateSignature::empty(), + ); + + // Create an ordered block with the pipelined blocks and proof + let ordered_block = OrderedBlock::new(pipelined_blocks, ordered_proof); + + // Verify the ordered block and ensure it fails (the blocks are not chained) + let error = ordered_block.verify_ordered_blocks().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create multiple pipelined blocks that are chained together + let mut pipelined_blocks = vec![]; + let mut expected_parent_id = None; + for _ in 0..5 { + // Create the pipelined block + let block_id = HashValue::random(); + let block_info = create_block_info(current_epoch, block_id); + let pipelined_block = create_pipelined_block_with_parent( + block_info, + expected_parent_id.unwrap_or_default(), + ); + + // Add the pipelined block to the list + pipelined_blocks.push(pipelined_block); + + // Update the expected parent ID + expected_parent_id = Some(block_id); + } + + // Create an ordered block proof with the same block ID as the last pipelined block + let last_block_info = pipelined_blocks.last().unwrap().block_info().clone(); + let ordered_proof = LedgerInfoWithSignatures::new( + LedgerInfo::new(last_block_info, HashValue::random()), + AggregateSignature::empty(), + ); + + // Create an ordered block with the pipelined blocks and proof + let ordered_block = OrderedBlock::new(pipelined_blocks, ordered_proof); + + // Verify the ordered block and ensure it passes + ordered_block.verify_ordered_blocks().unwrap(); + } + + #[test] + fn test_verify_ordered_proof() { + // Create a ledger info with an empty signature set + let current_epoch = 100; + let ledger_info = create_empty_ledger_info(current_epoch); + + // Create an epoch state for the current epoch (with an empty verifier) + let epoch_state = EpochState::new(current_epoch, ValidatorVerifier::new(vec![])); + + // Create an ordered block message with an empty block and ordered proof + let ordered_block = OrderedBlock::new(vec![], ledger_info); + + // Verify the ordered proof and ensure it passes + ordered_block.verify_ordered_proof(&epoch_state).unwrap(); + + // Create an epoch state for the current epoch (with a non-empty verifier) + let validator_signer = ValidatorSigner::random(None); + let validator_consensus_info = ValidatorConsensusInfo::new( + validator_signer.author(), + validator_signer.public_key(), + 100, + ); + let validator_verifier = ValidatorVerifier::new(vec![validator_consensus_info]); + let epoch_state = EpochState::new(current_epoch, validator_verifier.clone()); + + // Verify the ordered proof and ensure it fails (the signature set is insufficient) + let error = ordered_block + .verify_ordered_proof(&epoch_state) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + } + + #[test] + fn test_verify_payload_digests() { + // Create multiple signed transactions + let num_signed_transactions = 10; + let mut signed_transactions = create_signed_transactions(num_signed_transactions); + + // Create multiple batch proofs with random digests + let num_batches = num_signed_transactions - 1; + let mut proofs = vec![]; + for _ in 0..num_batches { + let batch_info = create_batch_info_with_digest(HashValue::random(), 1); + let proof = ProofOfStore::new(batch_info, AggregateSignature::empty()); + proofs.push(proof); + } + + // Create a single inline batch with a random digest + let inline_batch = create_batch_info_with_digest(HashValue::random(), 1); + let inline_batches = vec![inline_batch]; + + // Create a block payload (with the transactions, proofs and inline batches) + let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + + // Verify the block payload digests and ensure it fails (the batch digests don't match) + let error = block_payload.verify_payload_digests().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create multiple batch proofs with the correct digests + let mut proofs = vec![]; + for transaction in &signed_transactions[0..num_batches] { + let batch_payload = BatchPayload::new(PeerId::ZERO, vec![transaction.clone()]); + let batch_info = create_batch_info_with_digest(batch_payload.hash(), 1); + let proof = ProofOfStore::new(batch_info, AggregateSignature::empty()); + proofs.push(proof); + } + + // Create a block payload (with the transactions, correct proofs and inline batches) + let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + + // Verify the block payload digests and ensure it fails (the inline batch digests don't match) + let error = block_payload.verify_payload_digests().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a single inline batch with the correct digest + let inline_batch_payload = BatchPayload::new(PeerId::ZERO, vec![signed_transactions + .last() + .unwrap() + .clone()]); + let inline_batch_info = create_batch_info_with_digest(inline_batch_payload.hash(), 1); + let inline_batches = vec![inline_batch_info]; + + // Create a block payload (with the transactions, correct proofs and correct inline batches) + let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + + // Verify the block payload digests and ensure it passes + block_payload.verify_payload_digests().unwrap(); + + // Create a block payload (with too many transactions) + signed_transactions.append(&mut create_signed_transactions(1)); + let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + + // Verify the block payload digests and ensure it fails (there are too many transactions) + let error = block_payload.verify_payload_digests().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Create a block payload (with too few transactions) + for _ in 0..3 { + signed_transactions.pop(); + } + let block_payload = create_block_payload(&signed_transactions, &proofs, &inline_batches); + + // Verify the block payload digests and ensure it fails (there are too few transactions) + let error = block_payload.verify_payload_digests().unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + } + + #[test] + fn test_verify_payload_signatures() { + // Create multiple batch info proofs (with empty signatures) + let mut proofs = vec![]; + for _ in 0..3 { + let batch_info = create_batch_info(); + let proof = ProofOfStore::new(batch_info, AggregateSignature::empty()); + proofs.push(proof); + } + + // Create a transaction payload (with the proofs) + let transaction_payload = BlockTransactionPayload::new_quorum_store_inline_hybrid( + vec![], + proofs.clone(), + None, + vec![], + ); + + // Create a block payload + let current_epoch = 50; + let block_info = create_block_info(current_epoch, HashValue::random()); + let block_payload = BlockPayload::new(block_info, transaction_payload); + + // Create an epoch state for the current epoch (with an empty verifier) + let epoch_state = EpochState::new(current_epoch, ValidatorVerifier::new(vec![])); + + // Verify the block payload signatures and ensure it passes + block_payload + .verify_payload_signatures(&epoch_state) + .unwrap(); + + // Create an epoch state for the current epoch (with a non-empty verifier) + let validator_signer = ValidatorSigner::random(None); + let validator_consensus_info = ValidatorConsensusInfo::new( + validator_signer.author(), + validator_signer.public_key(), + 100, + ); + let validator_verifier = ValidatorVerifier::new(vec![validator_consensus_info]); + let epoch_state = EpochState::new(current_epoch, validator_verifier.clone()); + + // Verify the block payload signatures and ensure it fails (the signature set is insufficient) + let error = block_payload + .verify_payload_signatures(&epoch_state) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + } + + /// Creates and returns a new batch info with random data + fn create_batch_info() -> BatchInfo { + create_batch_info_with_digest(HashValue::random(), 0) + } + + /// Creates and returns a new batch info with the specified digest + fn create_batch_info_with_digest(digest: HashValue, num_transactions: u64) -> BatchInfo { + BatchInfo::new( + PeerId::ZERO, + BatchId::new(0), + 10, + 1, + digest, + num_transactions, + 1, + 0, + ) + } + + /// Creates and returns a new ordered block with the given block ID + fn create_block_info(epoch: u64, block_id: HashValue) -> BlockInfo { + BlockInfo::new(epoch, 0, block_id, HashValue::random(), 0, 0, None) + } + + /// Creates and returns a hybrid quorum store payload using the given data + fn create_block_payload( + signed_transactions: &[SignedTransaction], + proofs: &[ProofOfStore], + inline_batches: &[BatchInfo], + ) -> BlockPayload { + // Create the transaction payload + let transaction_payload = BlockTransactionPayload::new_quorum_store_inline_hybrid( + signed_transactions.to_vec(), + proofs.to_vec(), + None, + inline_batches.to_vec(), + ); + + // Create the block payload + BlockPayload::new( + create_block_info(0, HashValue::random()), + transaction_payload, + ) + } + + /// Creates and returns a new ledger info with an empty signature set + fn create_empty_ledger_info(epoch: u64) -> LedgerInfoWithSignatures { + LedgerInfoWithSignatures::new( + LedgerInfo::new(BlockInfo::random_with_epoch(epoch, 0), HashValue::random()), + AggregateSignature::empty(), + ) + } + + /// Creates and returns a new pipelined block with the given block info + fn create_pipelined_block(block_info: BlockInfo) -> Arc { + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + BlockType::Genesis, + ); + let block = Block::new_for_testing(block_info.id(), block_data, None); + Arc::new(PipelinedBlock::new_ordered(block)) + } + + /// Creates and returns a new pipelined block with the given block info and parent ID + fn create_pipelined_block_with_parent( + block_info: BlockInfo, + parent_block_id: HashValue, + ) -> Arc { + // Create the block type + let block_type = BlockType::DAGBlock { + author: Author::random(), + failed_authors: vec![], + validator_txns: vec![], + payload: Payload::DirectMempool(vec![]), + node_digests: vec![], + parent_block_id, + parents_bitvec: BitVec::with_num_bits(0), + }; + + // Create the block data + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + block_type, + ); + + // Create the pipelined block + let block = Block::new_for_testing(block_info.id(), block_data, None); + Arc::new(PipelinedBlock::new_ordered(block)) + } + + /// Creates a returns multiple signed transactions + fn create_signed_transactions(num_transactions: usize) -> Vec { + // Create a random sender and keypair + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + let sender = AccountAddress::random(); + + // Create multiple signed transactions + let mut transactions = vec![]; + for i in 0..num_transactions { + // Create the raw transaction + let transaction_payload = + TransactionPayload::Script(Script::new(vec![], vec![], vec![])); + let raw_transaction = RawTransaction::new( + sender, + i as u64, + transaction_payload, + 0, + 0, + 0, + ChainId::new(10), + ); + + // Create the signed transaction + let signed_transaction = SignedTransaction::new( + raw_transaction.clone(), + public_key.clone(), + private_key.sign(&raw_transaction).unwrap(), + ); + + // Save the signed transaction + transactions.push(signed_transaction) + } + + transactions + } +} diff --git a/consensus/src/consensus_observer/network_message.rs b/consensus/src/consensus_observer/network_message.rs deleted file mode 100644 index d8c95be6db813..0000000000000 --- a/consensus/src/consensus_observer/network_message.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_consensus_types::{ - pipeline::commit_decision::CommitDecision, pipelined_block::PipelinedBlock, -}; -use aptos_types::{ - block_info::BlockInfo, ledger_info::LedgerInfoWithSignatures, transaction::SignedTransaction, -}; -use serde::{Deserialize, Serialize}; -use std::{ - fmt::{Display, Formatter}, - sync::Arc, -}; - -/// Types of messages that can be sent between the consensus publisher and observer -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum ConsensusObserverMessage { - Request(ConsensusObserverRequest), - Response(ConsensusObserverResponse), - DirectSend(ConsensusObserverDirectSend), -} - -impl ConsensusObserverMessage { - /// Creates and returns a new ordered block message using the given blocks and ordered proof - pub fn new_ordered_block_message( - blocks: Vec>, - ordered_proof: LedgerInfoWithSignatures, - ) -> ConsensusObserverDirectSend { - ConsensusObserverDirectSend::OrderedBlock(OrderedBlock { - blocks, - ordered_proof, - }) - } - - /// Creates and returns a new commit decision message using the given commit decision - pub fn new_commit_decision_message( - commit_decision: CommitDecision, - ) -> ConsensusObserverDirectSend { - ConsensusObserverDirectSend::CommitDecision(commit_decision) - } - - /// Creates and returns a new block payload message using the given block, transactions and limit - pub fn new_block_payload_message( - block: BlockInfo, - transactions: Vec, - limit: Option, - ) -> ConsensusObserverDirectSend { - ConsensusObserverDirectSend::BlockPayload(BlockPayload { - block, - transactions, - limit, - }) - } -} - -impl Display for ConsensusObserverMessage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - ConsensusObserverMessage::Request(request) => { - write!(f, "ConsensusObserverRequest: {}", request.get_content()) - }, - ConsensusObserverMessage::Response(response) => { - write!(f, "ConsensusObserverResponse: {}", response.get_content()) - }, - ConsensusObserverMessage::DirectSend(direct_send) => { - write!( - f, - "ConsensusObserverDirectSend: {}", - direct_send.get_content() - ) - }, - } - } -} - -/// Types of requests that can be sent between the consensus publisher and observer -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum ConsensusObserverRequest { - Subscribe, - Unsubscribe, -} - -impl ConsensusObserverRequest { - /// Returns a summary label for the request - pub fn get_label(&self) -> &'static str { - match self { - ConsensusObserverRequest::Subscribe => "subscribe", - ConsensusObserverRequest::Unsubscribe => "unsubscribe", - } - } - - /// Returns the message content for the request. This is useful for debugging. - pub fn get_content(&self) -> String { - self.get_label().into() - } -} - -/// Types of responses that can be sent between the consensus publisher and observer -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum ConsensusObserverResponse { - SubscribeAck, - UnsubscribeAck, -} - -impl ConsensusObserverResponse { - /// Returns a summary label for the response - pub fn get_label(&self) -> &'static str { - match self { - ConsensusObserverResponse::SubscribeAck => "subscribe_ack", - ConsensusObserverResponse::UnsubscribeAck => "unsubscribe_ack", - } - } - - /// Returns the message content for the response. This is useful for debugging. - pub fn get_content(&self) -> String { - self.get_label().into() - } -} - -/// Types of direct sends that can be sent between the consensus publisher and observer -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum ConsensusObserverDirectSend { - OrderedBlock(OrderedBlock), - CommitDecision(CommitDecision), - BlockPayload(BlockPayload), -} - -impl ConsensusObserverDirectSend { - /// Returns a summary label for the direct send - pub fn get_label(&self) -> &'static str { - match self { - ConsensusObserverDirectSend::OrderedBlock(_) => "ordered_block", - ConsensusObserverDirectSend::CommitDecision(_) => "commit_decision", - ConsensusObserverDirectSend::BlockPayload(_) => "block_payload", - } - } - - /// Returns the message content for the direct send. This is useful for debugging. - pub fn get_content(&self) -> String { - match self { - ConsensusObserverDirectSend::OrderedBlock(ordered_block) => { - format!( - "OrderedBlock: {}", - ordered_block.ordered_proof.commit_info() - ) - }, - ConsensusObserverDirectSend::CommitDecision(commit_decision) => { - format!( - "CommitDecision: {}", - commit_decision.ledger_info().commit_info() - ) - }, - ConsensusObserverDirectSend::BlockPayload(block_payload) => { - format!( - "BlockPayload: {} {} {:?}", - block_payload.block.id(), - block_payload.transactions.len(), - block_payload.limit - ) - }, - } - } -} - -/// OrderedBlock message contains the ordered blocks and the proof of the ordering -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub struct OrderedBlock { - pub blocks: Vec>, - pub ordered_proof: LedgerInfoWithSignatures, -} - -/// Payload message contains the block, transactions and the limit of the block -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub struct BlockPayload { - pub block: BlockInfo, - pub transactions: Vec, - pub limit: Option, -} diff --git a/consensus/src/consensus_observer/observer.rs b/consensus/src/consensus_observer/observer.rs deleted file mode 100644 index 6ee64284a6204..0000000000000 --- a/consensus/src/consensus_observer/observer.rs +++ /dev/null @@ -1,1218 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - consensus_observer::{ - error::Error, - logging::{LogEntry, LogSchema}, - metrics, - network_client::ConsensusObserverClient, - network_events::{ConsensusObserverNetworkEvents, NetworkMessage, ResponseSender}, - network_message::{ - BlockPayload, ConsensusObserverDirectSend, ConsensusObserverMessage, - ConsensusObserverRequest, ConsensusObserverResponse, OrderedBlock, - }, - publisher::ConsensusPublisher, - subscription, - subscription::ConsensusObserverSubscription, - }, - dag::DagCommitSigner, - network::{IncomingCommitRequest, IncomingRandGenRequest}, - network_interface::CommitMessage, - payload_manager::PayloadManager, - pipeline::execution_client::TExecutionClient, - state_replication::StateComputerCommitCallBackType, -}; -use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; -use aptos_consensus_types::{ - pipeline::commit_decision::CommitDecision, pipelined_block::PipelinedBlock, -}; -use aptos_crypto::{ed25519, Genesis, HashValue}; -use aptos_event_notifications::{DbBackedOnChainConfig, ReconfigNotificationListener}; -use aptos_infallible::Mutex; -use aptos_logger::{debug, error, info, warn}; -use aptos_network::{ - application::{interface::NetworkClient, metadata::PeerMetadata}, - protocols::wire::handshake::v1::ProtocolId, -}; -use aptos_reliable_broadcast::DropGuard; -use aptos_storage_interface::DbReader; -use aptos_time_service::TimeService; -use aptos_types::{ - block_info::{BlockInfo, Round}, - epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, - on_chain_config::{ - OnChainConsensusConfig, OnChainExecutionConfig, OnChainRandomnessConfig, - RandomnessConfigMoveStruct, ValidatorSet, - }, - transaction::SignedTransaction, - validator_signer::ValidatorSigner, -}; -use futures::{ - future::{AbortHandle, Abortable}, - StreamExt, -}; -use futures_channel::oneshot; -use move_core_types::account_address::AccountAddress; -use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, - mem, - sync::Arc, - time::Duration, -}; -use tokio::{ - sync::{mpsc::UnboundedSender, oneshot as tokio_oneshot}, - time::interval, -}; -use tokio_stream::wrappers::IntervalStream; - -/// The transaction payload of each block -#[derive(Debug, Clone)] -pub struct BlockTransactionPayload { - pub transactions: Vec, - pub limit: Option, -} - -impl BlockTransactionPayload { - pub fn new(transactions: Vec, limit: Option) -> Self { - Self { - transactions, - limit, - } - } -} - -/// The status of consensus observer data -pub enum ObserverDataStatus { - Requested(tokio_oneshot::Sender), - Available(BlockTransactionPayload), -} - -/// The consensus observer receives consensus updates and propagates them to the execution pipeline -pub struct ConsensusObserver { - // The configuration of the consensus observer - consensus_observer_config: ConsensusObserverConfig, - // The consensus observer client to send network messages - consensus_observer_client: - Arc>>, - - // The current epoch - epoch: u64, - // The latest ledger info (updated via a callback) - root: Arc>, - - // The pending execute/commit blocks (also buffers when in sync mode) - pending_blocks: Arc)>>>, - // The execution client to the buffer manager - execution_client: Arc, - // The payload store maps block id's to transaction payloads (the same as payload manager returns) - payload_store: Arc>>, - - // If the sync handle is set it indicates that we're in state sync mode - sync_handle: Option, - // The sender to notify the consensus observer that state sync to the (epoch, round) is done - sync_notification_sender: UnboundedSender<(u64, Round)>, - // The reconfiguration event listener to refresh on-chain configs - reconfig_events: Option>, - - // The consensus publisher to forward payload messages - consensus_publisher: Option>, - // The currently active consensus observer subscription - active_observer_subscription: Option, - // A handle to storage (used to read the latest state and check progress) - db_reader: Arc, - // The time service (used to check progress) - time_service: TimeService, -} - -impl ConsensusObserver { - pub fn new( - consensus_observer_config: ConsensusObserverConfig, - consensus_observer_client: Arc< - ConsensusObserverClient>, - >, - db_reader: Arc, - execution_client: Arc, - sync_notification_sender: UnboundedSender<(u64, Round)>, - reconfig_events: Option>, - consensus_publisher: Option>, - time_service: TimeService, - ) -> Self { - // Read the latest ledger info from storage - let root = db_reader - .get_latest_ledger_info() - .expect("Failed to read latest ledger info!"); - - Self { - consensus_observer_config, - consensus_observer_client, - epoch: root.commit_info().epoch(), - root: Arc::new(Mutex::new(root)), - pending_blocks: Arc::new(Mutex::new(BTreeMap::new())), - execution_client, - payload_store: Arc::new(Mutex::new(HashMap::new())), - sync_handle: None, - sync_notification_sender, - reconfig_events, - consensus_publisher, - active_observer_subscription: None, - db_reader, - time_service, - } - } - - /// Checks the progress of the consensus observer - async fn check_progress(&mut self) { - debug!(LogSchema::new(LogEntry::ConsensusObserver) - .message("Checking consensus observer progress!")); - - // Get the peer ID of the currently active subscription (if any) - let active_subscription_peer = self - .active_observer_subscription - .as_ref() - .map(|subscription| subscription.get_peer_network_id()); - - // If we have an active subscription, verify that the subscription - // is still healthy. If not, the subscription should be terminated. - if let Some(active_subscription_peer) = active_subscription_peer { - if let Err(error) = self.check_active_subscription() { - // Log the subscription termination - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Terminating subscription to peer: {:?}! Error: {:?}", - active_subscription_peer, error - )) - ); - - // Unsubscribe from the peer - self.unsubscribe_from_peer(active_subscription_peer); - - // Update the subscription termination metrics - self.update_subscription_termination_metrics(active_subscription_peer, error); - } - } - - // If we don't have a subscription, we should select a new peer to - // subscribe to. If we had a previous subscription, it should be - // excluded from the selection process. - if self.active_observer_subscription.is_none() { - // Create a new observer subscription - self.create_new_observer_subscription(active_subscription_peer) - .await; - - // If we successfully created a new subscription, update the subscription creation metrics - if let Some(active_subscription) = &self.active_observer_subscription { - self.update_subscription_creation_metrics( - active_subscription.get_peer_network_id(), - ); - } - } - } - - /// Checks if the active subscription is still healthy. If not, an error is returned. - fn check_active_subscription(&mut self) -> Result<(), Error> { - let active_observer_subscription = self.active_observer_subscription.take(); - if let Some(mut active_subscription) = active_observer_subscription { - // Check if the peer for the subscription is still connected - let peer_network_id = active_subscription.get_peer_network_id(); - let peer_still_connected = self - .get_connected_peers_and_metadata() - .map_or(false, |peers_and_metadata| { - peers_and_metadata.contains_key(&peer_network_id) - }); - - // Verify the peer is still connected - if !peer_still_connected { - return Err(Error::SubscriptionDisconnected( - "The peer is no longer connected!".to_string(), - )); - } - - // Verify the subscription has not timed out - active_subscription.check_subscription_timeout()?; - - // Verify that the DB is continuing to sync and commit new data. - // Note: we should only do this if we're not waiting for state sync. - active_subscription.check_syncing_progress()?; - - // Verify that the subscription peer is optimal - if let Some(peers_and_metadata) = self.get_connected_peers_and_metadata() { - active_subscription.check_subscription_peer_optimality(peers_and_metadata)?; - } - - // The subscription seems healthy, we can keep it - self.active_observer_subscription = Some(active_subscription); - } - - Ok(()) - } - - /// Creates and returns a commit callback (to be called after the execution pipeline) - fn create_commit_callback(&self) -> StateComputerCommitCallBackType { - // Clone the root, pending blocks and payload store - let root = self.root.clone(); - let pending_blocks = self.pending_blocks.clone(); - let payload_store = self.payload_store.clone(); - - // Create the commit callback - Box::new(move |blocks, ledger_info: LedgerInfoWithSignatures| { - // Remove the committed blocks from the payload store - remove_payload_blocks(payload_store, blocks); - - // Remove the committed blocks from the pending blocks - remove_pending_blocks(pending_blocks, &ledger_info); - - // Verify the ledger info is for the same epoch - let mut root = root.lock(); - if ledger_info.commit_info().epoch() != root.commit_info().epoch() { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received commit callback for a different epoch! Ledger info: {:?}, Root: {:?}", - ledger_info.commit_info(), - root.commit_info() - )) - ); - return; - } - - // Update the root ledger info. Note: we only want to do this if - // the new ledger info round is greater than the current root - // round. Otherwise, this can race with the state sync process. - if ledger_info.commit_info().round() > root.commit_info().round() { - *root = ledger_info; - } - }) - } - - /// Creates a new observer subscription by sending subscription requests to - /// appropriate peers and waiting for a successful response. If `previous_subscription_peer` - /// is provided, it will be excluded from the selection process. - async fn create_new_observer_subscription( - &mut self, - previous_subscription_peer: Option, - ) { - // Get a set of sorted peers to service our subscription request - let sorted_peers = match self.sort_peers_for_subscription(previous_subscription_peer) { - Some(sorted_peers) => sorted_peers, - None => { - error!(LogSchema::new(LogEntry::ConsensusObserver) - .message("Failed to sort peers for subscription requests!")); - return; - }, - }; - - // Verify that we have potential peers - if sorted_peers.is_empty() { - warn!(LogSchema::new(LogEntry::ConsensusObserver) - .message("There are no peers to subscribe to!")); - return; - } - - // Go through the sorted peers and attempt to subscribe to a single peer. - // The first peer that responds successfully will be the selected peer. - for selected_peer in &sorted_peers { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Attempting to subscribe to peer: {}!", - selected_peer - )) - ); - - // Send a subscription request to the peer and wait for the response. - // Note: it is fine to block here because we assume only a single active subscription. - let subscription_request = ConsensusObserverRequest::Subscribe; - let response = self - .consensus_observer_client - .send_rpc_request_to_peer( - selected_peer, - subscription_request, - self.consensus_observer_config.network_request_timeout_ms, - ) - .await; - - // Process the response and update the active subscription - match response { - Ok(ConsensusObserverResponse::SubscribeAck) => { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Successfully subscribed to peer: {}!", - selected_peer - )) - ); - - // Update the active subscription - let subscription = ConsensusObserverSubscription::new( - self.consensus_observer_config, - self.db_reader.clone(), - *selected_peer, - self.time_service.clone(), - ); - self.active_observer_subscription = Some(subscription); - - return; // Return after successfully subscribing - }, - Ok(response) => { - // We received an invalid response - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Got unexpected response type: {:?}", - response.get_label() - )) - ); - }, - Err(error) => { - // We encountered an error while sending the request - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to send subscription request to peer: {}! Error: {:?}", - selected_peer, error - )) - ); - }, - } - } - - // We failed to connect to any peers - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to subscribe to any peers! Num peers attempted: {:?}", - sorted_peers.len() - )) - ); - } - - /// Finalizes the ordered block by sending it to the execution pipeline - async fn finalize_ordered_block( - &mut self, - blocks: &[Arc], - ordered_proof: LedgerInfoWithSignatures, - ) { - if let Err(error) = self - .execution_client - .finalize_order(blocks, ordered_proof, self.create_commit_callback()) - .await - { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to finalize ordered block! Error: {:?}", - error - )) - ); - } - } - - /// Forwards the commit decision to the execution pipeline - fn forward_commit_decision(&self, decision: CommitDecision) { - // Create a dummy RPC message - let (response_sender, _response_receiver) = oneshot::channel(); - let commit_request = IncomingCommitRequest { - req: CommitMessage::Decision(decision), - protocol: ProtocolId::ConsensusDirectSendCompressed, - response_sender, - }; - - // Send the message to the execution client - if let Err(error) = self - .execution_client - .send_commit_msg(AccountAddress::ONE, commit_request) - { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to send commit decision to the execution pipeline! Error: {:?}", - error - )) - ) - }; - } - - /// Returns the last known block - fn get_last_block(&self) -> BlockInfo { - if let Some((_, (last_blocks, _))) = self.pending_blocks.lock().last_key_value() { - // Return the last block in the pending blocks - last_blocks.blocks.last().unwrap().block_info() - } else { - // Return the root ledger info - self.root.lock().commit_info().clone() - } - } - - /// Gets the connected peers and metadata. If an error occurred, - /// it is logged and None is returned. - fn get_connected_peers_and_metadata(&self) -> Option> { - match self - .consensus_observer_client - .get_peers_and_metadata() - .get_connected_peers_and_metadata() - { - Ok(connected_peers_and_metadata) => Some(connected_peers_and_metadata), - Err(error) => { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to get connected peers and metadata! Error: {:?}", - error - )) - ); - None - }, - } - } - - /// Processes the block payload - fn process_block_payload(&mut self, block_payload: BlockPayload) { - // Unpack the block payload - let block = block_payload.block; - let transactions = block_payload.transactions; - let limit = block_payload.limit; - - // Update the payload store with the transaction payload - let transaction_payload = BlockTransactionPayload::new(transactions, limit); - match self.payload_store.lock().entry(block.id()) { - Entry::Occupied(mut entry) => { - // Replace the status with the new block payload - let mut status = ObserverDataStatus::Available(transaction_payload.clone()); - mem::swap(entry.get_mut(), &mut status); - - // If the status was originally requested, send the payload to the listener - if let ObserverDataStatus::Requested(payload_sender) = status { - if let Err(error) = payload_sender.send(transaction_payload) { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to send block payload to listener! Error: {:?}", - error - )) - ); - } - } - }, - Entry::Vacant(entry) => { - // Insert the block payload into the payload store - entry.insert(ObserverDataStatus::Available(transaction_payload)); - }, - } - } - - /// Processes the commit decision - fn process_commit_decision(&mut self, commit_decision: CommitDecision) { - // Update the pending blocks with the commit decision - if self.process_commit_decision_for_pending_block(&commit_decision) { - return; // The commit decision was successfully processed - } - - // Otherwise, check if we need to state sync (i.e., the - // commit decision is for a future epoch or round). - let decision_epoch = commit_decision.ledger_info().commit_info().epoch(); - let decision_round = commit_decision.round(); - let last_block = self.get_last_block(); - if decision_epoch > last_block.epoch() || decision_round > last_block.round() { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Started syncing to {}!", - commit_decision.ledger_info().commit_info() - )) - ); - - // Update the root and clear the pending blocks - *self.root.lock() = commit_decision.ledger_info().clone(); - self.pending_blocks.lock().clear(); - - // Start the state sync process - let abort_handle = sync_to_commit_decision( - commit_decision, - decision_epoch, - decision_round, - self.execution_client.clone(), - self.sync_notification_sender.clone(), - ); - self.sync_handle = Some(DropGuard::new(abort_handle)); - } - } - - /// Processes the commit decision for the pending block and returns iff - /// the commit decision was successfully processed. - fn process_commit_decision_for_pending_block(&self, commit_decision: &CommitDecision) -> bool { - let mut pending_blocks = self.pending_blocks.lock(); - if let Some((ordered_blocks, pending_commit_decision)) = - pending_blocks.get_mut(&commit_decision.round()) - { - // Check if the payload already exists - let payload_exists = { - let payload_store = self.payload_store.lock(); - ordered_blocks.blocks.iter().all(|block| { - matches!( - payload_store.get(&block.id()), - Some(ObserverDataStatus::Available(_)) - ) - }) - }; - - // If the payload exists, add the commit decision to the pending blocks - if payload_exists { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Adding decision to pending block: {}", - commit_decision.ledger_info().commit_info() - )) - ); - *pending_commit_decision = Some(commit_decision.clone()); - - // If we are not in sync mode, forward the commit decision to the execution pipeline - if self.sync_handle.is_none() { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Forwarding commit decision to the execution pipeline: {}", - commit_decision.ledger_info().commit_info() - )) - ); - self.forward_commit_decision(commit_decision.clone()); - } - - return true; // The commit decision was successfully processed - } - } - - false // The commit decision was not processed - } - - /// Processes a direct send message - async fn process_direct_send_message( - &mut self, - peer_network_id: PeerNetworkId, - message: ConsensusObserverDirectSend, - ) { - // Verify the message is from the peer we've subscribed to - if let Some(active_subscription) = &mut self.active_observer_subscription { - if let Err(error) = active_subscription.verify_message_sender(&peer_network_id) { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Message failed subscription sender verification! Error: {:?}", - error, - )) - ); - - // Send another unsubscription request to the peer - self.unsubscribe_from_peer(peer_network_id); - return; - } - } else { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received message from unexpected peer: {}! No active subscription found!", - peer_network_id - )) - ); - - // Send an unsubscription request to the peer - self.unsubscribe_from_peer(peer_network_id); - return; - }; - - // Increment the received message counter - metrics::increment_request_counter( - &metrics::OBSERVER_RECEIVED_MESSAGES, - message.get_label(), - &peer_network_id, - ); - - // Process the message based on the type - match message { - ConsensusObserverDirectSend::OrderedBlock(ordered_block) => { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received ordered block: {}, from peer: {}!", - ordered_block.ordered_proof.commit_info(), - peer_network_id - )) - ); - self.process_ordered_block(ordered_block).await; - }, - ConsensusObserverDirectSend::CommitDecision(commit_decision) => { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received commit decision: {}, from peer: {}!", - commit_decision.ledger_info().commit_info(), - peer_network_id - )) - ); - self.process_commit_decision(commit_decision); - }, - ConsensusObserverDirectSend::BlockPayload(block_payload) => { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received block payload: {}, from peer: {}!", - block_payload.block, peer_network_id - )) - ); - self.process_block_payload(block_payload); - }, - } - } - - /// Processes the ordered block - async fn process_ordered_block(&mut self, ordered_block: OrderedBlock) { - // Unpack the ordered block - let OrderedBlock { - blocks, - ordered_proof, - } = ordered_block.clone(); - - // Verify that we have at least one ordered block - if blocks.is_empty() { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received empty ordered block! Ignoring: {:?}", - ordered_proof.commit_info() - )) - ); - return; - } - - // If the block is a child of our last block, we can insert it - if self.get_last_block().id() == blocks.first().unwrap().parent_id() { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Adding ordered block to the pending blocks: {}", - ordered_proof.commit_info() - )) - ); - - // Insert the ordered block into the pending blocks - self.pending_blocks - .lock() - .insert(blocks.last().unwrap().round(), (ordered_block, None)); - - // If we are not in sync mode, forward the blocks to the execution pipeline - if self.sync_handle.is_none() { - debug!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Forwarding blocks to the execution pipeline: {}", - ordered_proof.commit_info() - )) - ); - - // Finalize the ordered block - self.finalize_ordered_block(&blocks, ordered_proof).await; - } - } else { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Parent block is missing! Ignoring: {:?}", - ordered_proof.commit_info() - )) - ); - } - } - - /// Processes a request message - fn process_request_message( - &mut self, - peer_network_id: PeerNetworkId, - request: ConsensusObserverRequest, - response_sender: Option, - ) { - // Ensure that the response sender is present - let response_sender = match response_sender { - Some(response_sender) => response_sender, - None => { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Missing response sender for RCP request: {:?}", - request - )) - ); - return; // Something has gone wrong! - }, - }; - - // Forward the request to the consensus publisher - if let Some(consensus_publisher) = &self.consensus_publisher { - consensus_publisher.handle_subscription_request( - &peer_network_id, - request, - response_sender, - ); - } - } - - /// Processes the sync complete notification for the given epoch and round - async fn process_sync_notification(&mut self, epoch: u64, round: Round) { - // Log the sync notification - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received sync complete notification for epoch {}, round: {}", - epoch, round - )) - ); - - // Verify that the sync notification is for the current epoch and round - if !check_root_epoch_and_round(self.root.clone(), epoch, round) { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Received invalid sync notification for epoch: {}, round: {}! Current root: {:?}", - epoch, round, self.root - )) - ); - return; - } - - // If the epoch has changed, end the current epoch and start the new one - if epoch > self.epoch { - self.execution_client.end_epoch().await; - self.wait_for_epoch_start().await; - } - - // Reset and drop the sync handle - self.sync_handle = None; - - // Process the pending blocks - let pending_blocks = self.pending_blocks.lock().clone(); - for (_, (ordered_block, commit_decision)) in pending_blocks.into_iter() { - // Unpack the ordered block - let OrderedBlock { - blocks, - ordered_proof, - } = ordered_block; - - // Finalize the ordered block - self.finalize_ordered_block(&blocks, ordered_proof).await; - - // If a commit decision is available, forward it to the execution pipeline - if let Some(commit_decision) = commit_decision { - self.forward_commit_decision(commit_decision.clone()); - } - } - } - - /// Produces a list of sorted peers to service our subscription request. Peers - /// are prioritized by validator distance and latency. - /// Note: if `previous_subscription_peer` is provided, it will be excluded - /// from the selection process. Likewise, all peers currently subscribed to us - /// will be excluded from the selection process. - fn sort_peers_for_subscription( - &mut self, - previous_subscription_peer: Option, - ) -> Option> { - if let Some(mut peers_and_metadata) = self.get_connected_peers_and_metadata() { - // Remove the previous subscription peer (if provided) - if let Some(previous_subscription_peer) = previous_subscription_peer { - let _ = peers_and_metadata.remove(&previous_subscription_peer); - } - - // Remove any peers that are currently subscribed to us - if let Some(consensus_publisher) = &self.consensus_publisher { - for peer_network_id in consensus_publisher.get_active_subscribers() { - let _ = peers_and_metadata.remove(&peer_network_id); - } - } - - // Sort the peers by validator distance and latency - let sorted_peers = subscription::sort_peers_by_distance_and_latency(peers_and_metadata); - - // Return the sorted peers - Some(sorted_peers) - } else { - None // No connected peers were found - } - } - - /// Unsubscribes from the given peer by sending an unsubscribe request - fn unsubscribe_from_peer(&self, peer_network_id: PeerNetworkId) { - // Send an unsubscribe request to the peer and process the response. - // Note: we execute this asynchronously, as we don't need to wait for the response. - let consensus_observer_client = self.consensus_observer_client.clone(); - let consensus_observer_config = self.consensus_observer_config; - tokio::spawn(async move { - // Send the unsubscribe request to the peer - let unsubscribe_request = ConsensusObserverRequest::Unsubscribe; - let response = consensus_observer_client - .send_rpc_request_to_peer( - &peer_network_id, - unsubscribe_request, - consensus_observer_config.network_request_timeout_ms, - ) - .await; - - // Process the response - match response { - Ok(ConsensusObserverResponse::UnsubscribeAck) => { - info!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Successfully unsubscribed from peer: {}!", - peer_network_id - )) - ); - }, - Ok(response) => { - // We received an invalid response - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Got unexpected response type: {:?}", - response.get_label() - )) - ); - }, - Err(error) => { - // We encountered an error while sending the request - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to send unsubscribe request to peer: {}! Error: {:?}", - peer_network_id, error - )) - ); - }, - } - }); - } - - /// Updates the subscription creation metrics for the given peer - fn update_subscription_creation_metrics(&self, peer_network_id: PeerNetworkId) { - // Set the number of active subscriptions - metrics::set_gauge( - &metrics::OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS, - &peer_network_id.network_id(), - 1, - ); - - // Update the number of created subscriptions - metrics::increment_request_counter( - &metrics::OBSERVER_CREATED_SUBSCRIPTIONS, - metrics::CREATED_SUBSCRIPTION_LABEL, - &peer_network_id, - ); - } - - /// Updates the subscription termination metrics for the given peer - fn update_subscription_termination_metrics( - &self, - peer_network_id: PeerNetworkId, - error: Error, - ) { - // Reset the number of active subscriptions - metrics::set_gauge( - &metrics::OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS, - &peer_network_id.network_id(), - 0, - ); - - // Update the number of terminated subscriptions - metrics::increment_request_counter( - &metrics::OBSERVER_TERMINATED_SUBSCRIPTIONS, - error.get_label(), - &peer_network_id, - ); - } - - /// Waits for a new epoch to start - async fn wait_for_epoch_start(&mut self) { - // Extract the epoch state and on-chain configs - let (epoch_state, consensus_config, execution_config, randomness_config) = if let Some( - reconfig_events, - ) = - &mut self.reconfig_events - { - extract_on_chain_configs(reconfig_events).await - } else { - panic!("Reconfig events are required to wait for a new epoch to start! Something has gone wrong!") - }; - - // Update the local epoch - self.epoch = epoch_state.epoch; - info!(LogSchema::new(LogEntry::ConsensusObserver) - .message(&format!("New epoch started: {}", self.epoch))); - - // Create the payload manager - let payload_manager = if consensus_config.quorum_store_enabled() { - PayloadManager::ConsensusObserver( - self.payload_store.clone(), - self.consensus_publisher.clone(), - ) - } else { - PayloadManager::DirectMempool - }; - - // Start the new epoch - let signer = Arc::new(ValidatorSigner::new( - AccountAddress::ZERO, - ed25519::PrivateKey::genesis(), - )); - let dummy_signer = Arc::new(DagCommitSigner::new(signer.clone())); - let (_, rand_msg_rx) = - aptos_channel::new::(QueueStyle::FIFO, 1, None); - self.execution_client - .start_epoch( - epoch_state.clone(), - dummy_signer, - Arc::new(payload_manager), - &consensus_config, - &execution_config, - &randomness_config, - None, - None, - rand_msg_rx, - 0, - ) - .await; - } - - /// Starts the consensus observer loop that processes incoming - /// network messages and ensures the observer is making progress. - pub async fn start( - mut self, - mut network_service_events: ConsensusObserverNetworkEvents, - mut sync_notification_listener: tokio::sync::mpsc::UnboundedReceiver<(u64, Round)>, - ) { - // If the consensus publisher is enabled but the observer is disabled, - // we should only forward incoming requests to the consensus publisher. - if self.consensus_observer_config.publisher_enabled - && !self.consensus_observer_config.observer_enabled - { - self.start_publisher_forwarding(&mut network_service_events) - .await; - return; // We should never return from this function - } - - // Create a progress check ticker - let mut progress_check_interval = IntervalStream::new(interval(Duration::from_millis( - self.consensus_observer_config.progress_check_interval_ms, - ))) - .fuse(); - - // Wait for the epoch to start - self.wait_for_epoch_start().await; - - // Start the consensus observer loop - info!(LogSchema::new(LogEntry::ConsensusObserver) - .message("Starting the consensus observer loop!")); - loop { - tokio::select! { - Some(network_message) = network_service_events.next() => { - // Unpack the network message - let NetworkMessage { - peer_network_id, - protocol_id: _, - consensus_observer_message, - response_sender, - } = network_message; - - // Process the consensus observer message - match consensus_observer_message { - ConsensusObserverMessage::DirectSend(message) => { - self.process_direct_send_message(peer_network_id, message).await; - }, - ConsensusObserverMessage::Request(request) => { - self.process_request_message(peer_network_id, request, response_sender); - }, - _ => { - error!(LogSchema::new(LogEntry::ConsensusObserver) - .message(&format!("Received unexpected message from peer: {}", peer_network_id))); - }, - } - } - Some((epoch, round)) = sync_notification_listener.recv() => { - self.process_sync_notification(epoch, round).await; - }, - _ = progress_check_interval.select_next_some() => { - self.check_progress().await; - } - else => break, - } - } - - // Log the exit of the consensus observer loop - error!(LogSchema::new(LogEntry::ConsensusObserver) - .message("The consensus observer loop exited unexpectedly!")); - } - - /// Starts the publisher forwarding loop that forwards incoming - /// requests to the consensus publisher. The rest of the consensus - /// observer functionality is disabled. - async fn start_publisher_forwarding( - &mut self, - network_service_events: &mut ConsensusObserverNetworkEvents, - ) { - // TODO: identify if there's a cleaner way to handle this! - - // Start the consensus publisher forwarding loop - info!(LogSchema::new(LogEntry::ConsensusObserver) - .message("Starting the consensus publisher forwarding loop!")); - loop { - tokio::select! { - Some(network_message) = network_service_events.next() => { - // Unpack the network message - let NetworkMessage { - peer_network_id, - protocol_id: _, - consensus_observer_message, - response_sender, - } = network_message; - - // Process the consensus observer message - match consensus_observer_message { - ConsensusObserverMessage::Request(request) => { - self.process_request_message(peer_network_id, request, response_sender); - }, - _ => { - error!(LogSchema::new(LogEntry::ConsensusObserver) - .message(&format!("Received unexpected message from peer: {}", peer_network_id))); - }, - } - } - } - } - } -} - -/// Checks that the epoch and round match the current root -fn check_root_epoch_and_round( - root: Arc>, - epoch: u64, - round: Round, -) -> bool { - // Get the expected epoch and round - let root = root.lock(); - let expected_epoch = root.commit_info().epoch(); - let expected_round = root.commit_info().round(); - - // Check if the expected epoch and round match - expected_epoch == epoch && expected_round == round -} - -/// A simple helper function that extracts the on-chain configs from the reconfig events -async fn extract_on_chain_configs( - reconfig_events: &mut ReconfigNotificationListener, -) -> ( - Arc, - OnChainConsensusConfig, - OnChainExecutionConfig, - OnChainRandomnessConfig, -) { - // Fetch the next reconfiguration notification - let reconfig_notification = reconfig_events - .next() - .await - .expect("Failed to get reconfig notification!"); - - // Extract the epoch state from the reconfiguration notification - let on_chain_configs = reconfig_notification.on_chain_configs; - let validator_set: ValidatorSet = on_chain_configs - .get() - .expect("Failed to get the validator set from the on-chain configs!"); - let epoch_state = Arc::new(EpochState { - epoch: on_chain_configs.epoch(), - verifier: (&validator_set).into(), - }); - - // Extract the consensus config (or use the default if it's missing) - let onchain_consensus_config: anyhow::Result = on_chain_configs.get(); - if let Err(error) = &onchain_consensus_config { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to read on-chain consensus config! Error: {:?}", - error - )) - ); - } - let consensus_config = onchain_consensus_config.unwrap_or_default(); - - // Extract the execution config (or use the default if it's missing) - let onchain_execution_config: anyhow::Result = on_chain_configs.get(); - if let Err(error) = &onchain_execution_config { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to read on-chain execution config! Error: {:?}", - error - )) - ); - } - let execution_config = - onchain_execution_config.unwrap_or_else(|_| OnChainExecutionConfig::default_if_missing()); - - // Extract the randomness config (or use the default if it's missing) - let onchain_randomness_config: anyhow::Result = - on_chain_configs.get(); - if let Err(error) = &onchain_randomness_config { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to read on-chain randomness config! Error: {:?}", - error - )) - ); - } - let onchain_randomness_config = onchain_randomness_config - .and_then(OnChainRandomnessConfig::try_from) - .unwrap_or_else(|_| OnChainRandomnessConfig::default_if_missing()); - - // Return the extracted epoch state and on-chain configs - ( - epoch_state, - consensus_config, - execution_config, - onchain_randomness_config, - ) -} - -/// Removes the given payload blocks from the payload store -fn remove_payload_blocks( - payload_store: Arc>>, - blocks: &[Arc], -) { - let mut payload_store = payload_store.lock(); - for block in blocks.iter() { - payload_store.remove(&block.id()); - } -} - -/// Removes the pending blocks before the given ledger info -fn remove_pending_blocks( - pending_blocks: Arc)>>>, - ledger_info: &LedgerInfoWithSignatures, -) { - // Determine the round to split off - let split_off_round = ledger_info.commit_info().round() + 1; - - // Remove the pending blocks before the split off round - let mut pending_blocks = pending_blocks.lock(); - *pending_blocks = pending_blocks.split_off(&split_off_round); -} - -/// Spawns a task to sync to the given commit decision and notifies -/// the consensus observer. Also, returns an abort handle to cancel the task. -fn sync_to_commit_decision( - commit_decision: CommitDecision, - decision_epoch: u64, - decision_round: Round, - execution_client: Arc, - sync_notification_sender: UnboundedSender<(u64, Round)>, -) -> AbortHandle { - let (abort_handle, abort_registration) = AbortHandle::new_pair(); - tokio::spawn(Abortable::new( - async move { - // Sync to the commit decision - if let Err(error) = execution_client - .clone() - .sync_to(commit_decision.ledger_info().clone()) - .await - { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to sync to commit decision: {:?}! Error: {:?}", - commit_decision, error - )) - ); - } - - // Notify the consensus observer that the sync is complete - if let Err(error) = sync_notification_sender.send((decision_epoch, decision_round)) { - error!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Failed to send sync notification for decision epoch: {:?}, round: {:?}! Error: {:?}", - decision_epoch, decision_round, error - )) - ); - } - }, - abort_registration, - )); - abort_handle -} diff --git a/consensus/src/consensus_observer/observer/active_state.rs b/consensus/src/consensus_observer/observer/active_state.rs new file mode 100644 index 0000000000000..f162fab553e15 --- /dev/null +++ b/consensus/src/consensus_observer/observer/active_state.rs @@ -0,0 +1,613 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + consensus_observer::{ + common::logging::{LogEntry, LogSchema}, + observer::{ + ordered_blocks::OrderedBlockStore, + payload_store::{BlockPayloadStatus, BlockPayloadStore}, + }, + publisher::consensus_publisher::ConsensusPublisher, + }, + payload_manager::{ + ConsensusObserverPayloadManager, DirectMempoolPayloadManager, TPayloadManager, + }, + state_replication::StateComputerCommitCallBackType, +}; +use aptos_config::config::NodeConfig; +use aptos_consensus_types::pipelined_block::PipelinedBlock; +use aptos_event_notifications::{DbBackedOnChainConfig, ReconfigNotificationListener}; +use aptos_infallible::Mutex; +use aptos_logger::{error, info, warn}; +use aptos_storage_interface::DbReader; +use aptos_types::{ + block_info::Round, + epoch_state::EpochState, + ledger_info::LedgerInfoWithSignatures, + on_chain_config::{ + OnChainConsensusConfig, OnChainExecutionConfig, OnChainRandomnessConfig, + RandomnessConfigMoveStruct, RandomnessConfigSeqNum, ValidatorSet, + }, +}; +use futures::StreamExt; +use std::{collections::BTreeMap, sync::Arc}; + +pub struct ActiveObserverState { + // The configuration of the node + node_config: NodeConfig, + + // The consensus publisher + consensus_publisher: Option>, + + // The current epoch state + epoch_state: Option>, + + // Whether quorum store is enabled for the current epoch + quorum_store_enabled: bool, + + // The reconfiguration event listener to refresh on-chain configs + reconfig_events: ReconfigNotificationListener, + + // The latest ledger info + root: Arc>, +} + +impl ActiveObserverState { + pub fn new( + node_config: NodeConfig, + db_reader: Arc, + reconfig_events: ReconfigNotificationListener, + consensus_publisher: Option>, + ) -> Self { + // Get the latest ledger info from storage + let root = db_reader + .get_latest_ledger_info() + .expect("Failed to read latest ledger info from storage!"); + + // Create the active observer state + ActiveObserverState::new_with_root(node_config, reconfig_events, consensus_publisher, root) + } + + /// Creates a returns a new active observer state with the given root ledger info + fn new_with_root( + node_config: NodeConfig, + reconfig_events: ReconfigNotificationListener, + consensus_publisher: Option>, + root: LedgerInfoWithSignatures, + ) -> Self { + Self { + node_config, + consensus_publisher, + epoch_state: None, + quorum_store_enabled: false, + reconfig_events, + root: Arc::new(Mutex::new(root)), + } + } + + /// Returns true iff the root epoch and round match the given values + pub fn check_root_epoch_and_round(&self, epoch: u64, round: Round) -> bool { + // Get the expected epoch and round + let root = self.root(); + let expected_epoch = root.commit_info().epoch(); + let expected_round = root.commit_info().round(); + + // Check if the expected epoch and round match the given values + expected_epoch == epoch && expected_round == round + } + + /// Creates and returns a commit callback. This will update the + /// root ledger info and remove the blocks from the given stores. + pub fn create_commit_callback( + &self, + pending_ordered_blocks: Arc>, + block_payload_store: Arc>, + ) -> StateComputerCommitCallBackType { + // Clone the root pointer + let root = self.root.clone(); + + // Create the commit callback + Box::new(move |blocks, ledger_info: LedgerInfoWithSignatures| { + handle_committed_blocks( + pending_ordered_blocks, + block_payload_store, + root, + blocks, + ledger_info, + ); + }) + } + + /// Returns the current epoch state + pub fn epoch_state(&self) -> Arc { + self.epoch_state + .clone() + .expect("The epoch state is not set! This should never happen!") + } + + /// Returns true iff the quorum store is enabled for the current epoch + pub fn is_quorum_store_enabled(&self) -> bool { + self.quorum_store_enabled + } + + /// Returns a clone of the current root ledger info + pub fn root(&self) -> LedgerInfoWithSignatures { + self.root.lock().clone() + } + + /// Updates the root ledger info + pub fn update_root(&self, new_root: LedgerInfoWithSignatures) { + *self.root.lock() = new_root; + } + + /// Waits for a new epoch to start (signaled by the reconfig events) and + /// returns the new payload manager and on-chain configs (for the epoch). + pub async fn wait_for_epoch_start( + &mut self, + block_payloads: Arc< + Mutex>, + >, + ) -> ( + Arc, + OnChainConsensusConfig, + OnChainExecutionConfig, + OnChainRandomnessConfig, + ) { + // Extract the epoch state and on-chain configs + let (epoch_state, consensus_config, execution_config, randomness_config) = + extract_on_chain_configs(&self.node_config, &mut self.reconfig_events).await; + + // Update the local epoch state and quorum store config + self.epoch_state = Some(epoch_state.clone()); + self.quorum_store_enabled = consensus_config.quorum_store_enabled(); + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "New epoch started: {:?}. Updated the epoch state! Quorum store enabled: {:?}", + epoch_state.epoch, self.quorum_store_enabled, + )) + ); + + // Create the payload manager + let payload_manager: Arc = if self.quorum_store_enabled { + Arc::new(ConsensusObserverPayloadManager::new( + block_payloads, + self.consensus_publisher.clone(), + )) + } else { + Arc::new(DirectMempoolPayloadManager {}) + }; + + // Return the payload manager and on-chain configs + ( + payload_manager, + consensus_config, + execution_config, + randomness_config, + ) + } +} + +/// A simple helper function that extracts the on-chain configs from the reconfig events +async fn extract_on_chain_configs( + node_config: &NodeConfig, + reconfig_events: &mut ReconfigNotificationListener, +) -> ( + Arc, + OnChainConsensusConfig, + OnChainExecutionConfig, + OnChainRandomnessConfig, +) { + // Fetch the next reconfiguration notification + let reconfig_notification = reconfig_events + .next() + .await + .expect("Failed to get reconfig notification!"); + + // Extract the epoch state from the reconfiguration notification + let on_chain_configs = reconfig_notification.on_chain_configs; + let validator_set: ValidatorSet = on_chain_configs + .get() + .expect("Failed to get the validator set from the on-chain configs!"); + let epoch_state = Arc::new(EpochState { + epoch: on_chain_configs.epoch(), + verifier: (&validator_set).into(), + }); + + // Extract the consensus config (or use the default if it's missing) + let onchain_consensus_config: anyhow::Result = on_chain_configs.get(); + if let Err(error) = &onchain_consensus_config { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to read on-chain consensus config! Error: {:?}", + error + )) + ); + } + let consensus_config = onchain_consensus_config.unwrap_or_default(); + + // Extract the execution config (or use the default if it's missing) + let onchain_execution_config: anyhow::Result = on_chain_configs.get(); + if let Err(error) = &onchain_execution_config { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to read on-chain execution config! Error: {:?}", + error + )) + ); + } + let execution_config = + onchain_execution_config.unwrap_or_else(|_| OnChainExecutionConfig::default_if_missing()); + + // Extract the randomness config sequence number (or use the default if it's missing) + let onchain_randomness_config_seq_num: anyhow::Result = + on_chain_configs.get(); + if let Err(error) = &onchain_randomness_config_seq_num { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to read on-chain randomness config seq num! Error: {:?}", + error + )) + ); + } + let onchain_randomness_config_seq_num = onchain_randomness_config_seq_num + .unwrap_or_else(|_| RandomnessConfigSeqNum::default_if_missing()); + + // Extract the randomness config + let onchain_randomness_config: anyhow::Result = + on_chain_configs.get(); + if let Err(error) = &onchain_randomness_config { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to read on-chain randomness config! Error: {:?}", + error + )) + ); + } + let onchain_randomness_config = OnChainRandomnessConfig::from_configs( + node_config.randomness_override_seq_num, + onchain_randomness_config_seq_num.seq_num, + onchain_randomness_config.ok(), + ); + + // Return the extracted epoch state and on-chain configs + ( + epoch_state, + consensus_config, + execution_config, + onchain_randomness_config, + ) +} + +/// A simple helper function that handles the committed blocks +/// (as part of the commit callback). +fn handle_committed_blocks( + pending_ordered_blocks: Arc>, + block_payload_store: Arc>, + root: Arc>, + blocks: &[Arc], + ledger_info: LedgerInfoWithSignatures, +) { + // Remove the committed blocks from the payload and pending stores + block_payload_store.lock().remove_committed_blocks(blocks); + pending_ordered_blocks + .lock() + .remove_blocks_for_commit(&ledger_info); + + // Verify the ledger info is for the same epoch + let mut root = root.lock(); + if ledger_info.commit_info().epoch() != root.commit_info().epoch() { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Received commit callback for a different epoch! Ledger info: {:?}, Root: {:?}", + ledger_info.commit_info(), + root.commit_info() + )) + ); + return; + } + + // Update the root ledger info. Note: we only want to do this if + // the new ledger info round is greater than the current root + // round. Otherwise, this can race with the state sync process. + if ledger_info.commit_info().round() > root.commit_info().round() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Updating the root ledger info! Old root: (epoch: {:?}, round: {:?}). New root: (epoch: {:?}, round: {:?})", + root.commit_info().epoch(), + root.commit_info().round(), + ledger_info.commit_info().epoch(), + ledger_info.commit_info().round(), + )) + ); + *root = ledger_info; + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::consensus_observer::network::observer_message::{ + BlockPayload, BlockTransactionPayload, OrderedBlock, + }; + use aptos_channels::{aptos_channel, message_queues::QueueStyle}; + use aptos_consensus_types::{ + block::Block, + block_data::{BlockData, BlockType}, + quorum_cert::QuorumCert, + }; + use aptos_crypto::HashValue; + use aptos_event_notifications::ReconfigNotification; + use aptos_types::{ + aggregate_signature::AggregateSignature, block_info::BlockInfo, ledger_info::LedgerInfo, + transaction::Version, + }; + + #[test] + fn test_check_root_epoch_and_round() { + // Create a root ledger info + let epoch = 10; + let round = 5; + let root = create_ledger_info(epoch, round); + + // Create the active observer state + let (_, reconfig_events) = create_reconfig_notifier_and_listener(); + let observer_state = + ActiveObserverState::new_with_root(NodeConfig::default(), reconfig_events, None, root); + + // Check the root epoch and round + assert!(observer_state.check_root_epoch_and_round(epoch, round)); + assert!(!observer_state.check_root_epoch_and_round(epoch, round + 1)); + assert!(!observer_state.check_root_epoch_and_round(epoch + 1, round)); + + // Update the root ledger info + let new_epoch = epoch + 10; + let new_round = round + 100; + let new_root = create_ledger_info(new_epoch, new_round); + observer_state.update_root(new_root.clone()); + + // Check the updated root epoch and round + assert!(!observer_state.check_root_epoch_and_round(epoch, round)); + assert!(observer_state.check_root_epoch_and_round(new_epoch, new_round)); + } + + #[test] + fn test_get_and_update_root() { + // Create a root ledger info + let epoch = 100; + let round = 50; + let root = create_ledger_info(epoch, round); + + // Create the active observer state + let (_, reconfig_events) = create_reconfig_notifier_and_listener(); + let observer_state = ActiveObserverState::new_with_root( + NodeConfig::default(), + reconfig_events, + None, + root.clone(), + ); + + // Check the root ledger info + assert_eq!(observer_state.root(), root); + + // Update the root ledger info + let new_root = create_ledger_info(epoch, round + 1000); + observer_state.update_root(new_root.clone()); + + // Check the updated root ledger info + assert_eq!(observer_state.root(), new_root); + } + + #[test] + fn test_handle_committed_blocks() { + // Create a node config + let node_config = NodeConfig::default(); + + // Create the root ledger info + let epoch = 1000; + let round = 100; + let root = Arc::new(Mutex::new(create_ledger_info(epoch, round))); + + // Create the ordered block store and block payload store + let ordered_block_store = Arc::new(Mutex::new(OrderedBlockStore::new( + node_config.consensus_observer, + ))); + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + node_config.consensus_observer, + ))); + + // Handle the committed blocks at the wrong epoch and verify the root is not updated + handle_committed_blocks( + ordered_block_store.clone(), + block_payload_store.clone(), + root.clone(), + &[], + create_ledger_info(epoch + 1, round + 1), + ); + assert_eq!(root.lock().commit_info().epoch(), epoch); + + // Handle the committed blocks at the wrong round and verify the root is not updated + handle_committed_blocks( + ordered_block_store.clone(), + block_payload_store.clone(), + root.clone(), + &[], + create_ledger_info(epoch, round - 1), + ); + assert_eq!(root.lock().commit_info().round(), round); + + // Add pending ordered blocks + let num_ordered_blocks = 10; + let ordered_blocks = create_and_add_ordered_blocks( + ordered_block_store.clone(), + num_ordered_blocks, + epoch, + round, + ); + + // Add block payloads for the ordered blocks + for ordered_block in &ordered_blocks { + create_and_add_payloads_for_ordered_block(block_payload_store.clone(), ordered_block); + } + + // Create the commit ledger info (for the second to last block) + let commit_round = round + (num_ordered_blocks as Round) - 2; + let committed_ledger_info = create_ledger_info(epoch, commit_round); + + // Create the committed blocks and ledger info + let mut committed_blocks = vec![]; + for ordered_block in ordered_blocks.iter().take(num_ordered_blocks - 1) { + let pipelined_block = create_pipelined_block(ordered_block.blocks()[0].block_info()); + committed_blocks.push(pipelined_block); + } + + // Handle the committed blocks + handle_committed_blocks( + ordered_block_store.clone(), + block_payload_store.clone(), + root.clone(), + &committed_blocks, + committed_ledger_info.clone(), + ); + + // Verify the committed blocks are removed from the stores + assert_eq!(ordered_block_store.lock().get_all_ordered_blocks().len(), 1); + assert_eq!( + block_payload_store.lock().get_block_payloads().lock().len(), + 1 + ); + + // Verify the root is updated + assert_eq!(root.lock().clone(), committed_ledger_info); + } + + #[test] + fn test_simple_epoch_state() { + // Create a root ledger info + let epoch = 10; + let round = 5; + let root = create_ledger_info(epoch, round); + + // Create the active observer state + let (_, reconfig_events) = create_reconfig_notifier_and_listener(); + let mut observer_state = + ActiveObserverState::new_with_root(NodeConfig::default(), reconfig_events, None, root); + + // Verify that quorum store is not enabled + assert!(!observer_state.is_quorum_store_enabled()); + + // Manually update the epoch state and quorum store flag + let epoch_state = Arc::new(EpochState::empty()); + observer_state.epoch_state = Some(epoch_state.clone()); + observer_state.quorum_store_enabled = true; + + // Verify the epoch state and quorum store flag are updated + assert_eq!(observer_state.epoch_state(), epoch_state); + assert!(observer_state.is_quorum_store_enabled()); + } + + /// Creates and adds the specified number of ordered blocks to the ordered blocks + fn create_and_add_ordered_blocks( + ordered_block_store: Arc>, + num_ordered_blocks: usize, + epoch: u64, + starting_round: Round, + ) -> Vec { + let mut ordered_blocks = vec![]; + for i in 0..num_ordered_blocks { + // Create a new block info + let round = starting_round + (i as Round); + let block_info = BlockInfo::new( + epoch, + round, + HashValue::random(), + HashValue::random(), + i as Version, + i as u64, + None, + ); + + // Create a pipelined block + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + BlockType::Genesis, + ); + let block = Block::new_for_testing(block_info.id(), block_data, None); + let pipelined_block = Arc::new(PipelinedBlock::new_ordered(block)); + + // Create an ordered block + let blocks = vec![pipelined_block]; + let ordered_proof = + create_ledger_info(epoch, i as aptos_consensus_types::common::Round); + let ordered_block = OrderedBlock::new(blocks, ordered_proof); + + // Insert the block into the ordered block store + ordered_block_store + .lock() + .insert_ordered_block(ordered_block.clone()); + + // Add the block to the ordered blocks + ordered_blocks.push(ordered_block); + } + + ordered_blocks + } + + /// Creates and adds payloads for the ordered block + fn create_and_add_payloads_for_ordered_block( + block_payload_store: Arc>, + ordered_block: &OrderedBlock, + ) { + for block in ordered_block.blocks() { + let block_payload = + BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); + } + } + + /// Creates and returns a new ledger info with the specified epoch and round + fn create_ledger_info( + epoch: u64, + round: aptos_consensus_types::common::Round, + ) -> LedgerInfoWithSignatures { + LedgerInfoWithSignatures::new( + LedgerInfo::new( + BlockInfo::random_with_epoch(epoch, round), + HashValue::random(), + ), + AggregateSignature::empty(), + ) + } + + /// Creates and returns a new pipelined block with the given block info + fn create_pipelined_block(block_info: BlockInfo) -> Arc { + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + BlockType::Genesis, + ); + let block = Block::new_for_testing(block_info.id(), block_data, None); + Arc::new(PipelinedBlock::new_ordered(block)) + } + + /// Creates and returns a reconfig notifier and listener + fn create_reconfig_notifier_and_listener() -> ( + aptos_channel::Sender<(), ReconfigNotification>, + ReconfigNotificationListener, + ) { + let (notification_sender, notification_receiver) = + aptos_channel::new(QueueStyle::LIFO, 1, None); + let reconfig_notification_listener = ReconfigNotificationListener { + notification_receiver, + }; + + (notification_sender, reconfig_notification_listener) + } +} diff --git a/consensus/src/consensus_observer/observer/consensus_observer.rs b/consensus/src/consensus_observer/observer/consensus_observer.rs new file mode 100644 index 0000000000000..5a47cb1912e0b --- /dev/null +++ b/consensus/src/consensus_observer/observer/consensus_observer.rs @@ -0,0 +1,1040 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + consensus_observer::{ + common::{ + logging::{LogEntry, LogSchema}, + metrics, + }, + network::{ + network_handler::ConsensusObserverNetworkMessage, + observer_client::ConsensusObserverClient, + observer_message::{ + BlockPayload, CommitDecision, ConsensusObserverDirectSend, + ConsensusObserverMessage, OrderedBlock, + }, + }, + observer::{ + active_state::ActiveObserverState, ordered_blocks::OrderedBlockStore, + payload_store::BlockPayloadStore, pending_blocks::PendingBlockStore, + subscription_manager::SubscriptionManager, + }, + publisher::consensus_publisher::ConsensusPublisher, + }, + dag::DagCommitSigner, + network::{IncomingCommitRequest, IncomingRandGenRequest}, + network_interface::CommitMessage, + pipeline::execution_client::TExecutionClient, +}; +use aptos_channels::{aptos_channel, aptos_channel::Receiver, message_queues::QueueStyle}; +use aptos_config::{ + config::{ConsensusObserverConfig, NodeConfig}, + network_id::PeerNetworkId, +}; +use aptos_consensus_types::{pipeline, pipelined_block::PipelinedBlock}; +use aptos_crypto::{bls12381, ed25519, Genesis}; +use aptos_event_notifications::{DbBackedOnChainConfig, ReconfigNotificationListener}; +use aptos_infallible::Mutex; +use aptos_logger::{debug, error, info, warn}; +use aptos_network::{ + application::interface::NetworkClient, protocols::wire::handshake::v1::ProtocolId, +}; +use aptos_reliable_broadcast::DropGuard; +use aptos_storage_interface::DbReader; +use aptos_time_service::TimeService; +use aptos_types::{ + block_info::{BlockInfo, Round}, + epoch_state::EpochState, + validator_signer::ValidatorSigner, +}; +use futures::{ + future::{AbortHandle, Abortable}, + StreamExt, +}; +use futures_channel::oneshot; +use move_core_types::account_address::AccountAddress; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::mpsc::UnboundedSender, time::interval}; +use tokio_stream::wrappers::IntervalStream; + +// Whether to log messages at the info level (useful for debugging) +const LOG_MESSAGES_AT_INFO_LEVEL: bool = true; + +/// The consensus observer receives consensus updates and propagates them to the execution pipeline +pub struct ConsensusObserver { + // The currently active observer state (e.g., epoch and root) + active_observer_state: ActiveObserverState, + + // The block payload store (containing the block transaction payloads) + block_payload_store: Arc>, + + // The ordered block store (containing ordered blocks that are ready for execution) + ordered_block_store: Arc>, + + // The pending block store (containing pending blocks that are without payloads) + pending_block_store: Arc>, + + // The execution client to the buffer manager + execution_client: Arc, + + // The sender to notify the observer that state syncing to the (epoch, round) has completed + sync_notification_sender: UnboundedSender<(u64, Round)>, + + // If the sync handle is set it indicates that we're in state sync mode. + // The flag indicates if we're waiting to transition to a new epoch. + sync_handle: Option<(DropGuard, bool)>, + + // The consensus observer subscription manager + subscription_manager: SubscriptionManager, +} + +impl ConsensusObserver { + pub fn new( + node_config: NodeConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + db_reader: Arc, + execution_client: Arc, + sync_notification_sender: UnboundedSender<(u64, Round)>, + reconfig_events: Option>, + consensus_publisher: Option>, + time_service: TimeService, + ) -> Self { + // Get the consensus observer config + let consensus_observer_config = node_config.consensus_observer; + + // Create the subscription manager + let subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + consensus_publisher.clone(), + db_reader.clone(), + time_service.clone(), + ); + + // Create the active observer state + let reconfig_events = + reconfig_events.expect("Reconfig events should exist for the consensus observer!"); + let active_observer_state = + ActiveObserverState::new(node_config, db_reader, reconfig_events, consensus_publisher); + + // Create the block and payload stores + let ordered_block_store = OrderedBlockStore::new(consensus_observer_config); + let block_payload_store = BlockPayloadStore::new(consensus_observer_config); + let pending_block_store = PendingBlockStore::new(consensus_observer_config); + + // Create the consensus observer + Self { + active_observer_state, + ordered_block_store: Arc::new(Mutex::new(ordered_block_store)), + block_payload_store: Arc::new(Mutex::new(block_payload_store)), + pending_block_store: Arc::new(Mutex::new(pending_block_store)), + execution_client, + sync_notification_sender, + sync_handle: None, + subscription_manager, + } + } + + /// Returns true iff all payloads exist for the given blocks + fn all_payloads_exist(&self, blocks: &[Arc]) -> bool { + // If quorum store is disabled, all payloads exist (they're already in the blocks) + if !self.active_observer_state.is_quorum_store_enabled() { + return true; + } + + // Otherwise, check if all the payloads exist in the payload store + self.block_payload_store.lock().all_payloads_exist(blocks) + } + + /// Checks the progress of the consensus observer + async fn check_progress(&mut self) { + debug!(LogSchema::new(LogEntry::ConsensusObserver) + .message("Checking consensus observer progress!")); + + // If we're in state sync mode, we should wait for state sync to complete + if self.in_state_sync_mode() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Waiting for state sync to reach target: {:?}!", + self.active_observer_state.root().commit_info() + )) + ); + return; + } + + // Otherwise, check the health of the active subscriptions + if let Err(error) = self + .subscription_manager + .check_and_manage_subscriptions() + .await + { + // Log the failure and clear the pending block state + warn!(LogSchema::new(LogEntry::ConsensusObserver) + .message(&format!("Subscription checks failed! Error: {:?}", error))); + self.clear_pending_block_state().await; + } + } + + /// Clears the pending block state (this is useful for changing + /// subscriptions, where we want to wipe all state and restart). + async fn clear_pending_block_state(&self) { + // Clear the payload store + self.block_payload_store.lock().clear_all_payloads(); + + // Clear the pending blocks + self.pending_block_store.lock().clear_missing_blocks(); + + // Clear the ordered blocks + self.ordered_block_store.lock().clear_all_ordered_blocks(); + + // Reset the execution pipeline for the root + let root = self.active_observer_state.root(); + if let Err(error) = self.execution_client.reset(&root).await { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to reset the execution pipeline for the root! Error: {:?}", + error + )) + ); + } + + // Increment the cleared block state counter + metrics::increment_counter_without_labels(&metrics::OBSERVER_CLEARED_BLOCK_STATE); + } + + /// Finalizes the ordered block by sending it to the execution pipeline + async fn finalize_ordered_block(&mut self, ordered_block: OrderedBlock) { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Forwarding ordered blocks to the execution pipeline: {}", + ordered_block.proof_block_info() + )) + ); + + // Create the commit callback (to be called after the execution pipeline) + let commit_callback = self.active_observer_state.create_commit_callback( + self.ordered_block_store.clone(), + self.block_payload_store.clone(), + ); + + // Send the ordered block to the execution pipeline + if let Err(error) = self + .execution_client + .finalize_order( + ordered_block.blocks(), + ordered_block.ordered_proof().clone(), + commit_callback, + ) + .await + { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to finalize ordered block! Error: {:?}", + error + )) + ); + } + } + + /// Forwards the commit decision to the execution pipeline + fn forward_commit_decision(&self, commit_decision: CommitDecision) { + // Create a dummy RPC message + let (response_sender, _response_receiver) = oneshot::channel(); + let commit_request = IncomingCommitRequest { + req: CommitMessage::Decision(pipeline::commit_decision::CommitDecision::new( + commit_decision.commit_proof().clone(), + )), + protocol: ProtocolId::ConsensusDirectSendCompressed, + response_sender, + }; + + // Send the message to the execution client + if let Err(error) = self + .execution_client + .send_commit_msg(AccountAddress::ONE, commit_request) + { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to send commit decision to the execution pipeline! Error: {:?}", + error + )) + ) + }; + } + + /// Returns the current epoch state, and panics if it is not set + fn get_epoch_state(&self) -> Arc { + self.active_observer_state.epoch_state() + } + + /// Returns the highest committed block epoch and round + fn get_highest_committed_epoch_round(&self) -> (u64, Round) { + if let Some(epoch_round) = self + .ordered_block_store + .lock() + .get_highest_committed_epoch_round() + { + epoch_round + } else { + // Return the root epoch and round + let root_block_info = self.active_observer_state.root().commit_info().clone(); + (root_block_info.epoch(), root_block_info.round()) + } + } + + /// Returns the last ordered block + fn get_last_ordered_block(&self) -> BlockInfo { + if let Some(last_ordered_block) = self.ordered_block_store.lock().get_last_ordered_block() { + last_ordered_block + } else { + // Return the root ledger info + self.active_observer_state.root().commit_info().clone() + } + } + + /// Returns true iff we are waiting for state sync to complete an epoch change + fn in_state_sync_epoch_change(&self) -> bool { + matches!(self.sync_handle, Some((_, true))) + } + + /// Returns true iff we are waiting for state sync to complete + fn in_state_sync_mode(&self) -> bool { + self.sync_handle.is_some() + } + + /// Orders any ready pending blocks for the given epoch and round + async fn order_ready_pending_block(&mut self, block_epoch: u64, block_round: Round) { + // Get any ready ordered block + let ready_ordered_block = self.pending_block_store.lock().remove_ready_block( + block_epoch, + block_round, + self.block_payload_store.clone(), + ); + + // Process the ready ordered block (if it exists) + if let Some(ready_ordered_block) = ready_ordered_block { + self.process_ordered_block(ready_ordered_block).await; + } + } + + /// Processes the block payload message + async fn process_block_payload_message( + &mut self, + peer_network_id: PeerNetworkId, + block_payload: BlockPayload, + ) { + // Get the epoch and round for the block + let block_epoch = block_payload.epoch(); + let block_round = block_payload.round(); + + // Determine if the payload is behind the last ordered block, or if it already exists + let last_ordered_block = self.get_last_ordered_block(); + let payload_out_of_date = + (block_epoch, block_round) <= (last_ordered_block.epoch(), last_ordered_block.round()); + let payload_exists = self + .block_payload_store + .lock() + .existing_payload_entry(&block_payload); + + // If the payload is out of date or already exists, ignore it + if payload_out_of_date || payload_exists { + // Update the metrics for the dropped block payload + update_metrics_for_dropped_block_payload_message(peer_network_id, &block_payload); + return; + } + + // Update the metrics for the received block payload + update_metrics_for_block_payload_message(peer_network_id, &block_payload); + + // Verify the block payload digests + if let Err(error) = block_payload.verify_payload_digests() { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify block payload digests! Ignoring block: {:?}. Error: {:?}", + block_payload.block(), + error + )) + ); + return; + } + + // If the payload is for the current epoch, verify the proof signatures + let epoch_state = self.get_epoch_state(); + let verified_payload = if block_epoch == epoch_state.epoch { + // Verify the block proof signatures + if let Err(error) = block_payload.verify_payload_signatures(&epoch_state) { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify block payload signatures! Ignoring block: {:?}. Error: {:?}", + block_payload.block(), error + )) + ); + return; + } + + true // We have successfully verified the signatures + } else { + false // We can't verify the signatures yet + }; + + // Update the payload store with the payload + self.block_payload_store + .lock() + .insert_block_payload(block_payload, verified_payload); + + // Check if there are blocks that were missing payloads but are + // now ready because of the new payload. Note: this should only + // be done if the payload has been verified correctly. + if verified_payload { + self.order_ready_pending_block(block_epoch, block_round) + .await; + } + } + + /// Processes the commit decision message + fn process_commit_decision_message( + &mut self, + peer_network_id: PeerNetworkId, + commit_decision: CommitDecision, + ) { + // Get the commit decision epoch and round + let commit_epoch = commit_decision.epoch(); + let commit_round = commit_decision.round(); + + // If the commit message is behind our highest committed block, ignore it + if (commit_epoch, commit_round) <= self.get_highest_committed_epoch_round() { + // Update the metrics for the dropped commit decision + update_metrics_for_dropped_commit_decision_message(peer_network_id, &commit_decision); + return; + } + + // Update the metrics for the received commit decision + update_metrics_for_commit_decision_message(peer_network_id, &commit_decision); + + // If the commit decision is for the current epoch, verify and process it + let epoch_state = self.get_epoch_state(); + if commit_epoch == epoch_state.epoch { + // Verify the commit decision + if let Err(error) = commit_decision.verify_commit_proof(&epoch_state) { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify commit decision! Ignoring: {:?}, Error: {:?}", + commit_decision.proof_block_info(), + error + )) + ); + return; + } + + // Update the pending blocks with the commit decision + if self.process_commit_decision_for_pending_block(&commit_decision) { + return; // The commit decision was successfully processed + } + } + + // TODO: identify the best way to handle an invalid commit decision + // for a future epoch. In such cases, we currently rely on state sync. + + // Otherwise, we failed to process the commit decision. If the commit + // is for a future epoch or round, we need to state sync. + let last_block = self.get_last_ordered_block(); + let epoch_changed = commit_epoch > last_block.epoch(); + if epoch_changed || commit_round > last_block.round() { + // If we're waiting for state sync to transition into a new epoch, + // we should just wait and not issue a new state sync request. + if self.in_state_sync_epoch_change() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Already waiting for state sync to reach new epoch: {:?}. Dropping commit decision: {:?}!", + self.active_observer_state.root().commit_info(), + commit_decision.proof_block_info() + )) + ); + return; + } + + // Otherwise, we should start the state sync process + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Started syncing to {}!", + commit_decision.proof_block_info() + )) + ); + + // Update the root and clear the pending blocks (up to the commit) + self.active_observer_state + .update_root(commit_decision.commit_proof().clone()); + self.block_payload_store + .lock() + .remove_blocks_for_epoch_round(commit_epoch, commit_round); + self.ordered_block_store + .lock() + .remove_blocks_for_commit(commit_decision.commit_proof()); + + // Start the state sync process + let abort_handle = sync_to_commit_decision( + commit_decision, + commit_epoch, + commit_round, + self.execution_client.clone(), + self.sync_notification_sender.clone(), + ); + self.sync_handle = Some((DropGuard::new(abort_handle), epoch_changed)); + } + } + + /// Processes the commit decision for the pending block and returns true iff + /// the commit decision was successfully processed. Note: this function + /// assumes the commit decision has already been verified. + fn process_commit_decision_for_pending_block(&self, commit_decision: &CommitDecision) -> bool { + // Get the pending block for the commit decision + let pending_block = self + .ordered_block_store + .lock() + .get_ordered_block(commit_decision.epoch(), commit_decision.round()); + + // Process the pending block + if let Some(pending_block) = pending_block { + // If all payloads exist, add the commit decision to the pending blocks + if self.all_payloads_exist(pending_block.blocks()) { + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Adding decision to pending block: {}", + commit_decision.proof_block_info() + )) + ); + self.ordered_block_store + .lock() + .update_commit_decision(commit_decision); + + // If we are not in sync mode, forward the commit decision to the execution pipeline + if !self.in_state_sync_mode() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Forwarding commit decision to the execution pipeline: {}", + commit_decision.proof_block_info() + )) + ); + self.forward_commit_decision(commit_decision.clone()); + } + + return true; // The commit decision was successfully processed + } + } + + false // The commit decision was not processed + } + + /// Processes a network message received by the consensus observer + async fn process_network_message(&mut self, network_message: ConsensusObserverNetworkMessage) { + // Unpack the network message + let (peer_network_id, message) = network_message.into_parts(); + + // Verify the message is from the peers we've subscribed to + if let Err(error) = self + .subscription_manager + .verify_message_for_subscription(peer_network_id) + { + // Increment the rejected message counter + metrics::increment_counter( + &metrics::OBSERVER_REJECTED_MESSAGES, + message.get_label(), + &peer_network_id, + ); + + // Log the error and return + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Received message that was not from an active subscription! Error: {:?}", + error, + )) + ); + return; + } + + // Increment the received message counter + metrics::increment_counter( + &metrics::OBSERVER_RECEIVED_MESSAGES, + message.get_label(), + &peer_network_id, + ); + + // Process the message based on the type + match message { + ConsensusObserverDirectSend::OrderedBlock(ordered_block) => { + self.process_ordered_block_message(peer_network_id, ordered_block) + .await; + }, + ConsensusObserverDirectSend::CommitDecision(commit_decision) => { + self.process_commit_decision_message(peer_network_id, commit_decision); + }, + ConsensusObserverDirectSend::BlockPayload(block_payload) => { + self.process_block_payload_message(peer_network_id, block_payload) + .await; + }, + } + + // Update the metrics for the processed blocks + self.update_processed_blocks_metrics(); + } + + /// Processes the ordered block + async fn process_ordered_block_message( + &mut self, + peer_network_id: PeerNetworkId, + ordered_block: OrderedBlock, + ) { + // Verify the ordered blocks before processing + if let Err(error) = ordered_block.verify_ordered_blocks() { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify ordered blocks! Ignoring: {:?}, Error: {:?}", + ordered_block.proof_block_info(), + error + )) + ); + return; + }; + + // Get the epoch and round of the first block + let first_block = ordered_block.first_block(); + let first_block_epoch_round = (first_block.epoch(), first_block.round()); + + // Determine if the block is behind the last ordered block, or if it is already pending + let last_ordered_block = self.get_last_ordered_block(); + let block_out_of_date = + first_block_epoch_round <= (last_ordered_block.epoch(), last_ordered_block.round()); + let block_pending = self + .pending_block_store + .lock() + .existing_pending_block(&ordered_block); + + // If the block is out of date or already pending, ignore it + if block_out_of_date || block_pending { + // Update the metrics for the dropped ordered block + update_metrics_for_dropped_ordered_block_message(peer_network_id, &ordered_block); + return; + } + + // Update the metrics for the received ordered block + update_metrics_for_ordered_block_message(peer_network_id, &ordered_block); + + // If all payloads exist, process the block. Otherwise, store it + // in the pending block store and wait for the payloads to arrive. + if self.all_payloads_exist(ordered_block.blocks()) { + self.process_ordered_block(ordered_block).await; + } else { + self.pending_block_store + .lock() + .insert_pending_block(ordered_block); + } + } + + /// Processes the ordered block. This assumes the ordered block + /// has been sanity checked and that all payloads exist. + async fn process_ordered_block(&mut self, ordered_block: OrderedBlock) { + // Verify the ordered block proof + let epoch_state = self.get_epoch_state(); + if ordered_block.proof_block_info().epoch() == epoch_state.epoch { + if let Err(error) = ordered_block.verify_ordered_proof(&epoch_state) { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify ordered proof! Ignoring: {:?}, Error: {:?}", + ordered_block.proof_block_info(), + error + )) + ); + return; + } + } else { + // Drop the block and log an error (the block should always be for the current epoch) + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Received ordered block for a different epoch! Ignoring: {:?}", + ordered_block.proof_block_info() + )) + ); + return; + }; + + // Verify the block payloads against the ordered block + if let Err(error) = self + .block_payload_store + .lock() + .verify_payloads_against_ordered_block(&ordered_block) + { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify block payloads against ordered block! Ignoring: {:?}, Error: {:?}", + ordered_block.proof_block_info(), + error + )) + ); + return; + } + + // The block was verified correctly. If the block is a child of our + // last block, we can insert it into the ordered block store. + if self.get_last_ordered_block().id() == ordered_block.first_block().parent_id() { + // Insert the ordered block into the pending blocks + self.ordered_block_store + .lock() + .insert_ordered_block(ordered_block.clone()); + + // If we're not in sync mode, finalize the ordered blocks + if !self.in_state_sync_mode() { + self.finalize_ordered_block(ordered_block).await; + } + } else { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Parent block for ordered block is missing! Ignoring: {:?}", + ordered_block.proof_block_info() + )) + ); + } + } + + /// Processes the sync complete notification for the given epoch and round + async fn process_sync_notification(&mut self, epoch: u64, round: Round) { + // Log the sync notification + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Received sync complete notification for epoch {}, round: {}", + epoch, round + )) + ); + + // Verify that the sync notification is for the current epoch and round + if !self + .active_observer_state + .check_root_epoch_and_round(epoch, round) + { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Received invalid sync notification for epoch: {}, round: {}! Current root: {:?}", + epoch, round, self.active_observer_state.root() + )) + ); + return; + } + + // If the epoch has changed, end the current epoch and start the new one + let current_epoch_state = self.get_epoch_state(); + if epoch > current_epoch_state.epoch { + // Wait for the next epoch to start + self.execution_client.end_epoch().await; + self.wait_for_epoch_start().await; + + // Verify the block payloads for the new epoch + let new_epoch_state = self.get_epoch_state(); + let verified_payload_rounds = self + .block_payload_store + .lock() + .verify_payload_signatures(&new_epoch_state); + + // Order all the pending blocks that are now ready (these were buffered during state sync) + for payload_round in verified_payload_rounds { + self.order_ready_pending_block(new_epoch_state.epoch, payload_round) + .await; + } + }; + + // Reset and drop the sync handle + self.sync_handle = None; + + // Process all the newly ordered blocks + let all_ordered_blocks = self.ordered_block_store.lock().get_all_ordered_blocks(); + for (_, (ordered_block, commit_decision)) in all_ordered_blocks { + // Finalize the ordered block + self.finalize_ordered_block(ordered_block).await; + + // If a commit decision is available, forward it to the execution pipeline + if let Some(commit_decision) = commit_decision { + self.forward_commit_decision(commit_decision.clone()); + } + } + } + + /// Updates the metrics for the processed blocks + fn update_processed_blocks_metrics(&self) { + // Update the payload store metrics + self.block_payload_store + .lock() + .update_payload_store_metrics(); + + // Update the pending block metrics + self.pending_block_store + .lock() + .update_pending_blocks_metrics(); + + // Update the pending block metrics + self.ordered_block_store + .lock() + .update_ordered_blocks_metrics(); + } + + /// Waits for a new epoch to start + async fn wait_for_epoch_start(&mut self) { + // Wait for the active state epoch to update + let block_payloads = self.block_payload_store.lock().get_block_payloads(); + let (payload_manager, consensus_config, execution_config, randomness_config) = self + .active_observer_state + .wait_for_epoch_start(block_payloads) + .await; + + // Fetch the new epoch state + let epoch_state = self.get_epoch_state(); + + // Start the new epoch + let sk = Arc::new(ed25519::PrivateKey::genesis()); + let signer = Arc::new(ValidatorSigner::new(AccountAddress::ZERO, sk.clone())); + let dummy_signer = Arc::new(DagCommitSigner::new(signer.clone())); + let (_, rand_msg_rx) = + aptos_channel::new::(QueueStyle::FIFO, 1, None); + self.execution_client + .start_epoch( + Some(sk), + epoch_state.clone(), + dummy_signer, + payload_manager, + &consensus_config, + &execution_config, + &randomness_config, + None, + None, + rand_msg_rx, + 0, + ) + .await; + } + + /// Starts the consensus observer loop that processes incoming + /// messages and ensures the observer is making progress. + pub async fn start( + mut self, + consensus_observer_config: ConsensusObserverConfig, + mut consensus_observer_message_receiver: Receiver<(), ConsensusObserverNetworkMessage>, + mut sync_notification_listener: tokio::sync::mpsc::UnboundedReceiver<(u64, Round)>, + ) { + // Create a progress check ticker + let mut progress_check_interval = IntervalStream::new(interval(Duration::from_millis( + consensus_observer_config.progress_check_interval_ms, + ))) + .fuse(); + + // Wait for the epoch to start + self.wait_for_epoch_start().await; + + // Start the consensus observer loop + info!(LogSchema::new(LogEntry::ConsensusObserver) + .message("Starting the consensus observer loop!")); + loop { + tokio::select! { + Some(network_message) = consensus_observer_message_receiver.next() => { + self.process_network_message(network_message).await; + } + Some((epoch, round)) = sync_notification_listener.recv() => { + self.process_sync_notification(epoch, round).await; + }, + _ = progress_check_interval.select_next_some() => { + self.check_progress().await; + } + else => { + break; // Exit the consensus observer loop + } + } + } + + // Log the exit of the consensus observer loop + error!(LogSchema::new(LogEntry::ConsensusObserver) + .message("The consensus observer loop exited unexpectedly!")); + } +} + +/// Logs the received message using an appropriate log level +fn log_received_message(message: String) { + // Log the message at the appropriate level + let log_schema = LogSchema::new(LogEntry::ConsensusObserver).message(&message); + if LOG_MESSAGES_AT_INFO_LEVEL { + info!(log_schema); + } else { + debug!(log_schema); + } +} + +/// Spawns a task to sync to the given commit decision and notifies +/// the consensus observer. Also, returns an abort handle to cancel the task. +fn sync_to_commit_decision( + commit_decision: CommitDecision, + decision_epoch: u64, + decision_round: Round, + execution_client: Arc, + sync_notification_sender: UnboundedSender<(u64, Round)>, +) -> AbortHandle { + let (abort_handle, abort_registration) = AbortHandle::new_pair(); + tokio::spawn(Abortable::new( + async move { + // Sync to the commit decision + if let Err(error) = execution_client + .clone() + .sync_to(commit_decision.commit_proof().clone()) + .await + { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to sync to commit decision: {:?}! Error: {:?}", + commit_decision, error + )) + ); + } + + // Notify the consensus observer that the sync is complete + if let Err(error) = sync_notification_sender.send((decision_epoch, decision_round)) { + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to send sync notification for decision epoch: {:?}, round: {:?}! Error: {:?}", + decision_epoch, decision_round, error + )) + ); + } + }, + abort_registration, + )); + abort_handle +} + +/// Updates the metrics for the received block payload message +fn update_metrics_for_block_payload_message( + peer_network_id: PeerNetworkId, + block_payload: &BlockPayload, +) { + // Log the received block payload message + let log_message = format!( + "Received block payload: {}, from peer: {}!", + block_payload.block(), + peer_network_id + ); + log_received_message(log_message); + + // Update the metrics for the received block payload + metrics::set_gauge_with_label( + &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, + metrics::BLOCK_PAYLOAD_LABEL, + block_payload.round(), + ); +} + +/// Updates the metrics for the received commit decision message +fn update_metrics_for_commit_decision_message( + peer_network_id: PeerNetworkId, + commit_decision: &CommitDecision, +) { + // Log the received commit decision message + let log_message = format!( + "Received commit decision: {}, from peer: {}!", + commit_decision.proof_block_info(), + peer_network_id + ); + log_received_message(log_message); + + // Update the metrics for the received commit decision + metrics::set_gauge_with_label( + &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, + metrics::COMMIT_DECISION_LABEL, + commit_decision.round(), + ); +} + +/// Updates the metrics for the dropped block payload message +fn update_metrics_for_dropped_block_payload_message( + peer_network_id: PeerNetworkId, + block_payload: &BlockPayload, +) { + // Increment the dropped message counter + metrics::increment_counter( + &metrics::OBSERVER_DROPPED_MESSAGES, + metrics::BLOCK_PAYLOAD_LABEL, + &peer_network_id, + ); + + // Log the dropped block payload message + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Ignoring block payload message from peer: {:?}! Block epoch and round: ({}, {})", + peer_network_id, + block_payload.epoch(), + block_payload.round() + )) + ); +} + +/// Updates the metrics for the dropped commit decision message +fn update_metrics_for_dropped_commit_decision_message( + peer_network_id: PeerNetworkId, + commit_decision: &CommitDecision, +) { + // Increment the dropped message counter + metrics::increment_counter( + &metrics::OBSERVER_DROPPED_MESSAGES, + metrics::COMMITTED_BLOCKS_LABEL, + &peer_network_id, + ); + + // Log the dropped commit decision message + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Ignoring commit decision message from peer: {:?}! Commit epoch and round: ({}, {})", + peer_network_id, + commit_decision.epoch(), + commit_decision.round() + )) + ); +} + +/// Updates the metrics for the dropped ordered block message +fn update_metrics_for_dropped_ordered_block_message( + peer_network_id: PeerNetworkId, + ordered_block: &OrderedBlock, +) { + // Increment the dropped message counter + metrics::increment_counter( + &metrics::OBSERVER_DROPPED_MESSAGES, + metrics::ORDERED_BLOCK_LABEL, + &peer_network_id, + ); + + // Log the dropped ordered block message + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Ignoring ordered block message from peer: {:?}! Block epoch and round: ({}, {})", + peer_network_id, + ordered_block.proof_block_info().epoch(), + ordered_block.proof_block_info().round() + )) + ); +} + +/// Updates the metrics for the received ordered block message +fn update_metrics_for_ordered_block_message( + peer_network_id: PeerNetworkId, + ordered_block: &OrderedBlock, +) { + // Log the received ordered block message + let log_message = format!( + "Received ordered block: {}, from peer: {}!", + ordered_block.proof_block_info(), + peer_network_id + ); + log_received_message(log_message); + + // Update the metrics for the received ordered block + metrics::set_gauge_with_label( + &metrics::OBSERVER_RECEIVED_MESSAGE_ROUNDS, + metrics::ORDERED_BLOCK_LABEL, + ordered_block.proof_block_info().round(), + ); +} diff --git a/consensus/src/consensus_observer/observer/mod.rs b/consensus/src/consensus_observer/observer/mod.rs new file mode 100644 index 0000000000000..4a4e5d42881a3 --- /dev/null +++ b/consensus/src/consensus_observer/observer/mod.rs @@ -0,0 +1,11 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod active_state; +pub mod consensus_observer; +pub mod ordered_blocks; +pub mod payload_store; +pub mod pending_blocks; +pub mod subscription; +pub mod subscription_manager; +pub mod subscription_utils; diff --git a/consensus/src/consensus_observer/observer/ordered_blocks.rs b/consensus/src/consensus_observer/observer/ordered_blocks.rs new file mode 100644 index 0000000000000..a2408b3a4b20d --- /dev/null +++ b/consensus/src/consensus_observer/observer/ordered_blocks.rs @@ -0,0 +1,747 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::{ + logging::{LogEntry, LogSchema}, + metrics, + }, + network::observer_message::{CommitDecision, OrderedBlock}, +}; +use aptos_config::config::ConsensusObserverConfig; +use aptos_consensus_types::common::Round; +use aptos_logger::{debug, warn}; +use aptos_types::{block_info::BlockInfo, ledger_info::LedgerInfoWithSignatures}; +use std::collections::BTreeMap; + +/// A simple struct to store ordered blocks +pub struct OrderedBlockStore { + // The configuration of the consensus observer + consensus_observer_config: ConsensusObserverConfig, + + // The highest committed block (epoch and round) + highest_committed_epoch_round: Option<(u64, Round)>, + + // Ordered blocks. The key is the epoch and round of the last block in the + // ordered block. Each entry contains the block and the commit decision (if any). + ordered_blocks: BTreeMap<(u64, Round), (OrderedBlock, Option)>, +} + +impl OrderedBlockStore { + pub fn new(consensus_observer_config: ConsensusObserverConfig) -> Self { + Self { + consensus_observer_config, + highest_committed_epoch_round: None, + ordered_blocks: BTreeMap::new(), + } + } + + /// Clears all ordered blocks + pub fn clear_all_ordered_blocks(&mut self) { + self.ordered_blocks.clear(); + } + + /// Returns a copy of the ordered blocks + pub fn get_all_ordered_blocks( + &self, + ) -> BTreeMap<(u64, Round), (OrderedBlock, Option)> { + self.ordered_blocks.clone() + } + + /// Returns the highest committed epoch and round (if any) + pub fn get_highest_committed_epoch_round(&self) -> Option<(u64, Round)> { + self.highest_committed_epoch_round + } + + /// Returns the last ordered block (if any) + pub fn get_last_ordered_block(&self) -> Option { + self.ordered_blocks + .last_key_value() + .map(|(_, (ordered_block, _))| ordered_block.last_block().block_info()) + } + + /// Returns the ordered block for the given epoch and round (if any) + pub fn get_ordered_block(&self, epoch: u64, round: Round) -> Option { + self.ordered_blocks + .get(&(epoch, round)) + .map(|(ordered_block, _)| ordered_block.clone()) + } + + /// Inserts the given ordered block into the ordered blocks. This function + /// assumes the block has already been checked to extend the current ordered + /// blocks, and that the ordered proof has been verified. + pub fn insert_ordered_block(&mut self, ordered_block: OrderedBlock) { + // Verify that the number of ordered blocks doesn't exceed the maximum + let max_num_ordered_blocks = self.consensus_observer_config.max_num_pending_blocks as usize; + if self.ordered_blocks.len() >= max_num_ordered_blocks { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Exceeded the maximum number of ordered blocks: {:?}. Dropping block: {:?}.", + max_num_ordered_blocks, + ordered_block.proof_block_info() + )) + ); + return; // Drop the block if we've exceeded the maximum + } + + // Otherwise, we can add the block to the ordered blocks + debug!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Adding ordered block to the ordered blocks: {:?}", + ordered_block.proof_block_info() + )) + ); + + // Get the epoch and round of the last ordered block + let last_block = ordered_block.last_block(); + let last_block_epoch = last_block.epoch(); + let last_block_round = last_block.round(); + + // Insert the ordered block + self.ordered_blocks + .insert((last_block_epoch, last_block_round), (ordered_block, None)); + } + + /// Removes the ordered blocks for the given commit ledger info. This will + /// remove all blocks up to (and including) the epoch and round of the commit. + pub fn remove_blocks_for_commit(&mut self, commit_ledger_info: &LedgerInfoWithSignatures) { + // Determine the epoch and round to split off + let split_off_epoch = commit_ledger_info.ledger_info().epoch(); + let split_off_round = commit_ledger_info.commit_info().round().saturating_add(1); + + // Remove the blocks from the ordered blocks + self.ordered_blocks = self + .ordered_blocks + .split_off(&(split_off_epoch, split_off_round)); + + // Update the highest committed epoch and round + self.update_highest_committed_epoch_round(commit_ledger_info); + } + + /// Updates the commit decision of the ordered block (if found) + pub fn update_commit_decision(&mut self, commit_decision: &CommitDecision) { + // Get the epoch and round of the commit decision + let commit_decision_epoch = commit_decision.epoch(); + let commit_decision_round = commit_decision.round(); + + // Update the commit decision for the ordered blocks + if let Some((_, existing_commit_decision)) = self + .ordered_blocks + .get_mut(&(commit_decision_epoch, commit_decision_round)) + { + *existing_commit_decision = Some(commit_decision.clone()); + } + + // Update the highest committed epoch and round + self.update_highest_committed_epoch_round(commit_decision.commit_proof()); + } + + /// Updates the highest committed epoch and round based on the commit ledger info + fn update_highest_committed_epoch_round( + &mut self, + commit_ledger_info: &LedgerInfoWithSignatures, + ) { + // Get the epoch and round of the commit ledger info + let commit_epoch = commit_ledger_info.ledger_info().epoch(); + let commit_round = commit_ledger_info.commit_info().round(); + let commit_epoch_round = (commit_epoch, commit_round); + + // Update the highest committed epoch and round (if appropriate) + match self.highest_committed_epoch_round { + Some(highest_committed_epoch_round) => { + if commit_epoch_round > highest_committed_epoch_round { + self.highest_committed_epoch_round = Some(commit_epoch_round); + } + }, + None => { + self.highest_committed_epoch_round = Some(commit_epoch_round); + }, + } + } + + /// Updates the metrics for the ordered blocks + pub fn update_ordered_blocks_metrics(&self) { + // Update the number of ordered block entries + let num_entries = self.ordered_blocks.len() as u64; + metrics::set_gauge_with_label( + &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, + metrics::ORDERED_BLOCK_ENTRIES_LABEL, + num_entries, + ); + + // Update the total number of ordered blocks + let num_ordered_blocks = self + .ordered_blocks + .values() + .map(|(ordered_block, _)| ordered_block.blocks().len() as u64) + .sum(); + metrics::set_gauge_with_label( + &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, + metrics::ORDERED_BLOCK_LABEL, + num_ordered_blocks, + ); + + // Update the highest round for the ordered blocks + let highest_ordered_round = self + .ordered_blocks + .last_key_value() + .map(|(_, (ordered_block, _))| ordered_block.last_block().round()) + .unwrap_or(0); + metrics::set_gauge_with_label( + &metrics::OBSERVER_PROCESSED_BLOCK_ROUNDS, + metrics::ORDERED_BLOCK_LABEL, + highest_ordered_round, + ); + + // Update the highest round for the committed blocks + let highest_committed_round = self + .highest_committed_epoch_round + .map(|(_, round)| round) + .unwrap_or(0); + metrics::set_gauge_with_label( + &metrics::OBSERVER_PROCESSED_BLOCK_ROUNDS, + metrics::COMMITTED_BLOCKS_LABEL, + highest_committed_round, + ); + } +} + +#[cfg(test)] +mod test { + use super::*; + use aptos_consensus_types::{ + block::Block, + block_data::{BlockData, BlockType}, + pipelined_block::PipelinedBlock, + quorum_cert::QuorumCert, + }; + use aptos_crypto::HashValue; + use aptos_types::{ + aggregate_signature::AggregateSignature, ledger_info::LedgerInfo, transaction::Version, + }; + use std::sync::Arc; + + #[test] + fn test_clear_all_ordered_blocks() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 0; + let num_ordered_blocks = 10; + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, current_epoch); + + // Clear all ordered blocks + ordered_block_store.clear_all_ordered_blocks(); + + // Check that all the ordered blocks were removed + assert!(ordered_block_store.ordered_blocks.is_empty()); + } + + #[test] + fn test_get_highest_committed_epoch_round() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Verify that we have no highest committed epoch and round + assert!(ordered_block_store + .get_highest_committed_epoch_round() + .is_none()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 10; + let num_ordered_blocks = 50; + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); + + // Create a commit decision for the first ordered block + let first_ordered_block = ordered_blocks.first().unwrap(); + let first_ordered_block_info = first_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(first_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the first ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is the first ordered block + verify_highest_committed_epoch_round(&ordered_block_store, &first_ordered_block_info); + + // Create a commit decision for the last ordered block + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the last ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is the last ordered block + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + + // Insert several ordered blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_ordered_blocks = 10; + let ordered_blocks = + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, next_epoch); + + // Verify the highest committed epoch and round is still the last ordered block + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + + // Create a commit decision for the first ordered block (in the next epoch) + let first_ordered_block = ordered_blocks.first().unwrap(); + let first_ordered_block_info = first_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(first_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the first ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is the first ordered block (in the next epoch) + verify_highest_committed_epoch_round(&ordered_block_store, &first_ordered_block_info); + + // Create a commit decision for the last ordered block (in the next epoch) + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Remove the ordered blocks for the commit decision + ordered_block_store.remove_blocks_for_commit(commit_decision.commit_proof()); + + // Verify the highest committed epoch and round is the last ordered block (in the next epoch) + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + + // Create a commit decision for an out-of-date ordered block + let out_of_date_ordered_block = ordered_blocks.first().unwrap(); + let out_of_date_ordered_block_info = out_of_date_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(out_of_date_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the out-of-date ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the highest committed epoch and round is still the last ordered block (in the next epoch) + verify_highest_committed_epoch_round(&ordered_block_store, &last_ordered_block_info); + } + + #[test] + fn test_get_last_ordered_block() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Verify that we have no last ordered block + assert!(ordered_block_store.get_last_ordered_block().is_none()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 0; + let num_ordered_blocks = 50; + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); + + // Verify the last ordered block is the block with the highest round + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + assert_eq!( + last_ordered_block_info, + ordered_block_store.get_last_ordered_block().unwrap() + ); + + // Insert several ordered blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_ordered_blocks = 50; + let ordered_blocks = + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, next_epoch); + + // Verify the last ordered block is the block with the highest epoch and round + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + assert_eq!( + last_ordered_block_info, + ordered_block_store.get_last_ordered_block().unwrap() + ); + } + + #[test] + fn test_get_ordered_block() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 0; + let num_ordered_blocks = 50; + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); + + // Ensure the ordered blocks were all inserted + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + assert_eq!(all_ordered_blocks.len(), num_ordered_blocks); + + // Verify the ordered blocks can be retrieved + for ordered_block in &ordered_blocks { + let block_info = ordered_block.last_block().block_info(); + let fetched_ordered_block = ordered_block_store + .get_ordered_block(block_info.epoch(), block_info.round()) + .unwrap(); + assert_eq!(ordered_block.clone(), fetched_ordered_block); + } + + // Verify that a non-existent block cannot be retrieved + let last_block = ordered_blocks.last().unwrap(); + let last_block_info = last_block.last_block().block_info(); + let ordered_block = ordered_block_store.get_ordered_block( + last_block_info.epoch(), + last_block_info.round() + 1, // Request a round that doesn't exist + ); + assert!(ordered_block.is_none()); + } + + #[test] + fn test_insert_ordered_block_limit() { + // Create a consensus observer config with a maximum of 10 pending blocks + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(consensus_observer_config); + + // Insert several ordered blocks for the current epoch + let current_epoch = 0; + let num_ordered_blocks = max_num_pending_blocks * 2; // Insert more than the maximum + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, current_epoch); + + // Verify the ordered blocks were inserted up to the maximum + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + assert_eq!(all_ordered_blocks.len(), max_num_pending_blocks); + + // Insert several ordered blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_ordered_blocks = max_num_pending_blocks - 1; // Insert one less than the maximum + let ordered_blocks = + create_and_add_ordered_blocks(&mut ordered_block_store, num_ordered_blocks, next_epoch); + + // Verify the ordered blocks were not inserted (they should have just been dropped) + for ordered_block in &ordered_blocks { + let block_info = ordered_block.last_block().block_info(); + let fetched_ordered_block = + ordered_block_store.get_ordered_block(block_info.epoch(), block_info.round()); + assert!(fetched_ordered_block.is_none()); + } + + // Verify the ordered blocks don't exceed the maximum + let num_ordered_blocks = ordered_block_store.get_all_ordered_blocks().len(); + assert_eq!(num_ordered_blocks, max_num_pending_blocks); + } + + #[test] + fn test_remove_blocks_for_commit() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 10; + let num_ordered_blocks = 10; + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); + + // Insert several ordered blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_ordered_blocks_next_epoch = 20; + let ordered_blocks_next_epoch = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks_next_epoch, + next_epoch, + ); + + // Insert several ordered blocks for a future epoch + let future_epoch = next_epoch + 1; + let num_ordered_blocks_future_epoch = 30; + create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks_future_epoch, + future_epoch, + ); + + // Create a commit decision for the first ordered block + let first_ordered_block = ordered_blocks.first().unwrap(); + let first_ordered_block_info = first_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(first_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Remove the ordered blocks for the commit decision + ordered_block_store.remove_blocks_for_commit(commit_decision.commit_proof()); + + // Verify the first ordered block was removed + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + assert!(!all_ordered_blocks.contains_key(&( + first_ordered_block_info.epoch(), + first_ordered_block_info.round() + ))); + assert_eq!( + all_ordered_blocks.len(), + num_ordered_blocks + num_ordered_blocks_next_epoch + num_ordered_blocks_future_epoch + - 1 + ); + + // Create a commit decision for the last ordered block (in the current epoch) + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Remove the ordered blocks for the commit decision + ordered_block_store.remove_blocks_for_commit(commit_decision.commit_proof()); + + // Verify the ordered blocks for the current epoch were removed + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + for ordered_block in ordered_blocks { + let block_info = ordered_block.last_block().block_info(); + assert!(!all_ordered_blocks.contains_key(&(block_info.epoch(), block_info.round()))); + } + assert_eq!( + all_ordered_blocks.len(), + num_ordered_blocks_next_epoch + num_ordered_blocks_future_epoch + ); + + // Create a commit decision for the last ordered block (in the next epoch) + let last_ordered_block = ordered_blocks_next_epoch.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Remove the ordered blocks for the commit decision + ordered_block_store.remove_blocks_for_commit(commit_decision.commit_proof()); + + // Verify the ordered blocks for the next epoch were removed + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + for ordered_block in ordered_blocks_next_epoch { + let block_info = ordered_block.last_block().block_info(); + assert!(!all_ordered_blocks.contains_key(&(block_info.epoch(), block_info.round()))); + } + assert_eq!(all_ordered_blocks.len(), num_ordered_blocks_future_epoch); + } + + #[test] + fn test_update_commit_decision() { + // Create a new ordered block store + let mut ordered_block_store = OrderedBlockStore::new(ConsensusObserverConfig::default()); + + // Insert several ordered blocks for the current epoch + let current_epoch = 0; + let num_ordered_blocks = 10; + let ordered_blocks = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks, + current_epoch, + ); + + // Insert several ordered blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_ordered_blocks_next_epoch = 20; + let ordered_blocks_next_epoch = create_and_add_ordered_blocks( + &mut ordered_block_store, + num_ordered_blocks_next_epoch, + next_epoch, + ); + + // Ensure the ordered blocks were all inserted + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + assert_eq!( + all_ordered_blocks.len(), + num_ordered_blocks + num_ordered_blocks_next_epoch + ); + + // Verify the ordered blocks don't have any commit decisions + for (_, (_, commit_decision)) in all_ordered_blocks.iter() { + assert!(commit_decision.is_none()); + } + + // Create a commit decision for the first ordered block + let first_ordered_block = ordered_blocks.first().unwrap(); + let first_ordered_block_info = first_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(first_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the first ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the commit decision was updated + verify_commit_decision( + &ordered_block_store, + &first_ordered_block_info, + commit_decision, + ); + + // Create a commit decision for the last ordered block (in the current epoch) + let last_ordered_block = ordered_blocks.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the last ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the commit decision was updated + verify_commit_decision( + &ordered_block_store, + &last_ordered_block_info, + commit_decision, + ); + + // Verify the commit decisions for the remaining blocks are still missing + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + for i in 1..9 { + let (_, commit_decision) = all_ordered_blocks.get(&(current_epoch, i as u64)).unwrap(); + assert!(commit_decision.is_none()); + } + + // Create a commit decision for the last ordered block (in the next epoch) + let last_ordered_block = ordered_blocks_next_epoch.last().unwrap(); + let last_ordered_block_info = last_ordered_block.last_block().block_info(); + let commit_decision = CommitDecision::new(LedgerInfoWithSignatures::new( + LedgerInfo::new(last_ordered_block_info.clone(), HashValue::random()), + AggregateSignature::empty(), + )); + + // Update the commit decision for the last ordered block + ordered_block_store.update_commit_decision(&commit_decision); + + // Verify the commit decision was updated + verify_commit_decision( + &ordered_block_store, + &last_ordered_block_info, + commit_decision, + ); + + // Verify the commit decisions for the remaining blocks are still missing + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + for i in 1..19 { + let (_, commit_decision) = all_ordered_blocks.get(&(next_epoch, i as u64)).unwrap(); + assert!(commit_decision.is_none()); + } + } + + /// Creates and adds the specified number of ordered blocks to the ordered blocks + fn create_and_add_ordered_blocks( + ordered_block_store: &mut OrderedBlockStore, + num_ordered_blocks: usize, + epoch: u64, + ) -> Vec { + let mut ordered_blocks = vec![]; + for i in 0..num_ordered_blocks { + // Create a new block info + let block_info = BlockInfo::new( + epoch, + i as aptos_types::block_info::Round, + HashValue::random(), + HashValue::random(), + i as Version, + i as u64, + None, + ); + + // Create a pipelined block + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + BlockType::Genesis, + ); + let block = Block::new_for_testing(block_info.id(), block_data, None); + let pipelined_block = Arc::new(PipelinedBlock::new_ordered(block)); + + // Create an ordered block + let blocks = vec![pipelined_block]; + let ordered_proof = create_ledger_info(epoch, i as Round); + let ordered_block = OrderedBlock::new(blocks, ordered_proof); + + // Insert the block into the ordered block store + ordered_block_store.insert_ordered_block(ordered_block.clone()); + + // Add the block to the ordered blocks + ordered_blocks.push(ordered_block); + } + + ordered_blocks + } + + /// Creates and returns a new ledger info with the specified epoch and round + fn create_ledger_info(epoch: u64, round: Round) -> LedgerInfoWithSignatures { + LedgerInfoWithSignatures::new( + LedgerInfo::new( + BlockInfo::random_with_epoch(epoch, round), + HashValue::random(), + ), + AggregateSignature::empty(), + ) + } + + /// Verifies the commit decision for the specified block info + fn verify_commit_decision( + ordered_block_store: &OrderedBlockStore, + block_info: &BlockInfo, + commit_decision: CommitDecision, + ) { + // Get the commit decision for the block + let all_ordered_blocks = ordered_block_store.get_all_ordered_blocks(); + let (_, updated_commit_decision) = all_ordered_blocks + .get(&(block_info.epoch(), block_info.round())) + .unwrap(); + + // Verify the commit decision is expected + assert_eq!( + commit_decision, + updated_commit_decision.as_ref().unwrap().clone() + ); + } + + /// Verifies the highest committed epoch and round matches the given block info + fn verify_highest_committed_epoch_round( + ordered_block_store: &OrderedBlockStore, + block_info: &BlockInfo, + ) { + // Verify the highest committed epoch and round is the block info + let highest_committed_epoch_round = ordered_block_store + .get_highest_committed_epoch_round() + .unwrap(); + assert_eq!( + highest_committed_epoch_round, + (block_info.epoch(), block_info.round()) + ); + } +} diff --git a/consensus/src/consensus_observer/observer/payload_store.rs b/consensus/src/consensus_observer/observer/payload_store.rs new file mode 100644 index 0000000000000..59859ec0b82ea --- /dev/null +++ b/consensus/src/consensus_observer/observer/payload_store.rs @@ -0,0 +1,1160 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::{ + error::Error, + logging::{LogEntry, LogSchema}, + metrics, + }, + network::observer_message::{BlockPayload, OrderedBlock}, +}; +use aptos_config::config::ConsensusObserverConfig; +use aptos_consensus_types::{common::Round, pipelined_block::PipelinedBlock}; +use aptos_infallible::Mutex; +use aptos_logger::{error, warn}; +use aptos_types::epoch_state::EpochState; +use std::{ + collections::{btree_map::Entry, BTreeMap}, + sync::Arc, +}; + +/// The status of the block payload +pub enum BlockPayloadStatus { + AvailableAndVerified(BlockPayload), + AvailableAndUnverified(BlockPayload), +} + +/// A simple struct to store the block payloads of ordered and committed blocks +pub struct BlockPayloadStore { + // The configuration of the consensus observer + consensus_observer_config: ConsensusObserverConfig, + + // Block transaction payloads (indexed by epoch and round). + // This is directly accessed by the payload manager. + block_payloads: Arc>>, +} + +impl BlockPayloadStore { + pub fn new(consensus_observer_config: ConsensusObserverConfig) -> Self { + Self { + consensus_observer_config, + block_payloads: Arc::new(Mutex::new(BTreeMap::new())), + } + } + + /// Returns true iff all the payloads for the given blocks + /// are available and have been verified. + pub fn all_payloads_exist(&self, blocks: &[Arc]) -> bool { + let block_payloads = self.block_payloads.lock(); + blocks.iter().all(|block| { + let epoch_and_round = (block.epoch(), block.round()); + matches!( + block_payloads.get(&epoch_and_round), + Some(BlockPayloadStatus::AvailableAndVerified(_)) + ) + }) + } + + /// Clears all the payloads from the block payload store + pub fn clear_all_payloads(&self) { + self.block_payloads.lock().clear(); + } + + /// Returns true iff we already have a payload entry for the given block + pub fn existing_payload_entry(&self, block_payload: &BlockPayload) -> bool { + // Get the epoch and round of the payload + let epoch_and_round = (block_payload.epoch(), block_payload.round()); + + // Check if a payload already exists in the store + self.block_payloads.lock().contains_key(&epoch_and_round) + } + + /// Returns a reference to the block payloads + pub fn get_block_payloads(&self) -> Arc>> { + self.block_payloads.clone() + } + + /// Inserts the given block payload data into the payload store + pub fn insert_block_payload( + &mut self, + block_payload: BlockPayload, + verified_payload_signatures: bool, + ) { + // Verify that the number of payloads doesn't exceed the maximum + let max_num_pending_blocks = self.consensus_observer_config.max_num_pending_blocks as usize; + if self.block_payloads.lock().len() >= max_num_pending_blocks { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Exceeded the maximum number of payloads: {:?}. Dropping block: {:?}!", + max_num_pending_blocks, + block_payload.block(), + )) + ); + return; // Drop the block if we've exceeded the maximum + } + + // Create the new payload status + let epoch_and_round = (block_payload.epoch(), block_payload.round()); + let payload_status = if verified_payload_signatures { + BlockPayloadStatus::AvailableAndVerified(block_payload) + } else { + BlockPayloadStatus::AvailableAndUnverified(block_payload) + }; + + // Insert the new payload status + self.block_payloads + .lock() + .insert(epoch_and_round, payload_status); + } + + /// Removes all blocks up to the specified epoch and round (inclusive) + pub fn remove_blocks_for_epoch_round(&self, epoch: u64, round: Round) { + // Determine the round to split off + let split_off_round = round.saturating_add(1); + + // Remove the blocks from the payload store + let mut block_payloads = self.block_payloads.lock(); + *block_payloads = block_payloads.split_off(&(epoch, split_off_round)); + } + + /// Removes the committed blocks from the payload store + pub fn remove_committed_blocks(&self, committed_blocks: &[Arc]) { + // Get the highest epoch and round for the committed blocks + let (highest_epoch, highest_round) = committed_blocks + .last() + .map_or((0, 0), |block| (block.epoch(), block.round())); + + // Remove the blocks + self.remove_blocks_for_epoch_round(highest_epoch, highest_round); + } + + /// Updates the metrics for the payload store + pub fn update_payload_store_metrics(&self) { + // Update the number of block payloads + let num_payloads = self.block_payloads.lock().len() as u64; + metrics::set_gauge_with_label( + &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, + metrics::STORED_PAYLOADS_LABEL, + num_payloads, + ); + + // Update the highest round for the block payloads + let highest_round = self + .block_payloads + .lock() + .last_key_value() + .map(|((_, round), _)| *round) + .unwrap_or(0); + metrics::set_gauge_with_label( + &metrics::OBSERVER_PROCESSED_BLOCK_ROUNDS, + metrics::STORED_PAYLOADS_LABEL, + highest_round, + ); + } + + /// Verifies all block payloads against the given ordered block. + /// If verification fails, an error is returned. + pub fn verify_payloads_against_ordered_block( + &mut self, + ordered_block: &OrderedBlock, + ) -> Result<(), Error> { + // Verify each of the blocks in the ordered block + for ordered_block in ordered_block.blocks() { + // Get the block epoch and round + let block_epoch = ordered_block.epoch(); + let block_round = ordered_block.round(); + + // Fetch the block payload + match self.block_payloads.lock().entry((block_epoch, block_round)) { + Entry::Occupied(entry) => { + // Get the block transaction payload + let transaction_payload = match entry.get() { + BlockPayloadStatus::AvailableAndVerified(block_payload) => { + block_payload.transaction_payload() + }, + BlockPayloadStatus::AvailableAndUnverified(_) => { + // The payload should have already been verified + return Err(Error::InvalidMessageError(format!( + "Payload verification failed! Block payload for epoch: {:?} and round: {:?} is unverified.", + ordered_block.epoch(), + ordered_block.round() + ))); + }, + }; + + // Get the ordered block payload + let ordered_block_payload = match ordered_block.block().payload() { + Some(payload) => payload, + None => { + return Err(Error::InvalidMessageError(format!( + "Payload verification failed! Missing block payload for epoch: {:?} and round: {:?}", + ordered_block.epoch(), + ordered_block.round() + ))); + }, + }; + + // Verify the transaction payload against the ordered block payload + transaction_payload.verify_against_ordered_payload(ordered_block_payload)?; + }, + Entry::Vacant(_) => { + // The payload is missing (this should never happen) + return Err(Error::InvalidMessageError(format!( + "Payload verification failed! Missing block payload for epoch: {:?} and round: {:?}", + ordered_block.epoch(), + ordered_block.round() + ))); + }, + } + } + + Ok(()) + } + + /// Verifies the block payload signatures against the given epoch state. + /// If verification is successful, blocks are marked as verified. Each + /// new verified block is + pub fn verify_payload_signatures(&mut self, epoch_state: &EpochState) -> Vec { + // Get the current epoch + let current_epoch = epoch_state.epoch; + + // Gather the keys for the block payloads + let payload_epochs_and_rounds: Vec<(u64, Round)> = + self.block_payloads.lock().keys().cloned().collect(); + + // Go through all unverified blocks and attempt to verify the signatures + let mut verified_payloads_to_update = vec![]; + for (epoch, round) in payload_epochs_and_rounds { + // Check if we can break early (BtreeMaps are sorted by key) + if epoch > current_epoch { + break; + } + + // Otherwise, attempt to verify the payload signatures + if epoch == current_epoch { + if let Entry::Occupied(mut entry) = self.block_payloads.lock().entry((epoch, round)) + { + if let BlockPayloadStatus::AvailableAndUnverified(block_payload) = + entry.get_mut() + { + if let Err(error) = block_payload.verify_payload_signatures(epoch_state) { + // Log the verification failure + error!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to verify the block payload signatures for epoch: {:?} and round: {:?}. Error: {:?}", + epoch, round, error + )) + ); + + // Remove the block payload from the store + entry.remove(); + } else { + // Save the block payload for reinsertion + verified_payloads_to_update.push(block_payload.clone()); + } + } + } + } + } + + // Collect the rounds of all newly verified blocks + let verified_payload_rounds: Vec = verified_payloads_to_update + .iter() + .map(|block_payload| block_payload.round()) + .collect(); + + // Update the verified block payloads. Note: this will cause + // notifications to be sent to any listeners that are waiting. + for verified_payload in verified_payloads_to_update { + self.insert_block_payload(verified_payload, true); + } + + // Return the newly verified payload rounds + verified_payload_rounds + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::consensus_observer::network::observer_message::BlockTransactionPayload; + use aptos_bitvec::BitVec; + use aptos_consensus_types::{ + block::Block, + block_data::{BlockData, BlockType}, + common::{Author, Payload, ProofWithData}, + proof_of_store::{BatchId, BatchInfo, ProofOfStore}, + quorum_cert::QuorumCert, + }; + use aptos_crypto::HashValue; + use aptos_types::{ + aggregate_signature::AggregateSignature, + block_info::{BlockInfo, Round}, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + transaction::Version, + validator_signer::ValidatorSigner, + validator_verifier::{ValidatorConsensusInfo, ValidatorVerifier}, + PeerId, + }; + use claims::assert_matches; + + #[test] + fn test_all_payloads_exist() { + // Create the consensus observer config + let max_num_pending_blocks = 1000; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks, + ..ConsensusObserverConfig::default() + }; + + // Create a new block payload store + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some unverified blocks to the payload store + let num_blocks_in_store = 100; + let unverified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 1, false); + + // Verify the payloads don't exist in the block payload store + assert!(!block_payload_store.all_payloads_exist(&unverified_blocks)); + assert_eq!(get_num_verified_payloads(&block_payload_store), 0); + assert_eq!( + get_num_unverified_payloads(&block_payload_store), + num_blocks_in_store + ); + + // Add some verified blocks to the payload store + let num_blocks_in_store = 100; + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); + + // Check that all the payloads exist in the block payload store + assert!(block_payload_store.all_payloads_exist(&verified_blocks)); + + // Check that a subset of the payloads exist in the block payload store + let subset_verified_blocks = &verified_blocks[0..50]; + assert!(block_payload_store.all_payloads_exist(subset_verified_blocks)); + + // Remove some of the payloads from the block payload store + block_payload_store.remove_committed_blocks(subset_verified_blocks); + + // Check that the payloads no longer exist in the block payload store + assert!(!block_payload_store.all_payloads_exist(subset_verified_blocks)); + + // Check that the remaining payloads still exist in the block payload store + let subset_verified_blocks = &verified_blocks[50..100]; + assert!(block_payload_store.all_payloads_exist(subset_verified_blocks)); + + // Remove the remaining payloads from the block payload store + block_payload_store.remove_committed_blocks(subset_verified_blocks); + + // Check that the payloads no longer exist in the block payload store + assert!(!block_payload_store.all_payloads_exist(subset_verified_blocks)); + } + + #[test] + fn test_all_payloads_exist_unverified() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add several verified blocks to the payload store + let num_blocks_in_store = 10; + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); + + // Check that the payloads exists in the block payload store + assert!(block_payload_store.all_payloads_exist(&verified_blocks)); + + // Mark the payload of the first block as unverified + mark_payload_as_unverified(&block_payload_store, &verified_blocks[0]); + + // Check that the payload no longer exists in the block payload store + assert!(!block_payload_store.all_payloads_exist(&verified_blocks)); + + // Check that the remaining payloads still exist in the block payload store + assert!(block_payload_store.all_payloads_exist(&verified_blocks[1..10])); + } + + #[test] + fn test_clear_all_payloads() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some unverified blocks to the payload store + let num_blocks_in_store = 30; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 1, false); + + // Add some verified blocks to the payload store + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); + + // Check that the payloads exist in the block payload store + assert!(block_payload_store.all_payloads_exist(&verified_blocks)); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, num_blocks_in_store); + check_num_verified_payloads(&block_payload_store, num_blocks_in_store); + + // Clear all the payloads from the block payload store + block_payload_store.clear_all_payloads(); + + // Check that the payloads no longer exist in the block payload store + assert!(!block_payload_store.all_payloads_exist(&verified_blocks)); + + // Check that the block payload store is empty + check_num_unverified_payloads(&block_payload_store, 0); + check_num_verified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_existing_payload_entry() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Create a new block payload + let epoch = 10; + let round = 100; + let block_payload = create_block_payload(epoch, round); + + // Check that the payload doesn't exist in the block payload store + assert!(!block_payload_store.existing_payload_entry(&block_payload)); + + // Insert the verified block payload into the block payload store + block_payload_store.insert_block_payload(block_payload.clone(), true); + + // Check that the payload now exists in the block payload store + assert!(block_payload_store.existing_payload_entry(&block_payload)); + + // Create another block payload + let epoch = 5; + let round = 101; + let block_payload = create_block_payload(epoch, round); + + // Check that the payload doesn't exist in the block payload store + assert!(!block_payload_store.existing_payload_entry(&block_payload)); + + // Insert the unverified block payload into the block payload store + block_payload_store.insert_block_payload(block_payload.clone(), false); + + // Check that the payload now exists in the block payload store + assert!(block_payload_store.existing_payload_entry(&block_payload)); + } + + #[test] + fn test_insert_block_payload() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some verified blocks to the payload store + let num_blocks_in_store = 20; + let verified_blocks = + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); + + // Check that the block payload store contains the new block payloads + assert!(block_payload_store.all_payloads_exist(&verified_blocks)); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, 0); + check_num_verified_payloads(&block_payload_store, num_blocks_in_store); + + // Mark the payload of the first block as unverified + mark_payload_as_unverified(&block_payload_store, &verified_blocks[0]); + + // Check that the payload no longer exists in the block payload store + assert!(!block_payload_store.all_payloads_exist(&verified_blocks)); + + // Verify the number of verified blocks in the block payload store + check_num_verified_payloads(&block_payload_store, num_blocks_in_store - 1); + + // Insert the same block payload into the block payload store (as verified) + let transaction_payload = BlockTransactionPayload::empty(); + let block_payload = BlockPayload::new(verified_blocks[0].block_info(), transaction_payload); + block_payload_store.insert_block_payload(block_payload, true); + + // Check that the block payload store now contains the requested block payload + assert!(block_payload_store.all_payloads_exist(&verified_blocks)); + } + + #[test] + fn test_insert_block_payload_limit_verified() { + // Create a new config observer config + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks, + ..ConsensusObserverConfig::default() + }; + + // Create a new block payload store + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add the maximum number of verified blocks to the payload store + let num_blocks_in_store = max_num_pending_blocks as usize; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, true); + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, num_blocks_in_store); + check_num_unverified_payloads(&block_payload_store, 0); + + // Add more blocks to the payload store + let num_blocks_to_add = 5; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, true); + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, max_num_pending_blocks as usize); + check_num_unverified_payloads(&block_payload_store, 0); + + // Add a large number of blocks to the payload store + let num_blocks_to_add = 100; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, true); + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, max_num_pending_blocks as usize); + check_num_unverified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_insert_block_payload_limit_unverified() { + // Create a new config observer config + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks, + ..ConsensusObserverConfig::default() + }; + + // Create a new block payload store + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add the maximum number of unverified blocks to the payload store + let num_blocks_in_store = max_num_pending_blocks as usize; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_in_store, 0, false); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, num_blocks_in_store); + check_num_verified_payloads(&block_payload_store, 0); + + // Add more blocks to the payload store + let num_blocks_to_add = 5; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, false); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, max_num_pending_blocks as usize); + check_num_verified_payloads(&block_payload_store, 0); + + // Add a large number of blocks to the payload store + let num_blocks_to_add = 100; + create_and_add_blocks_to_store(&mut block_payload_store, num_blocks_to_add, 0, false); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, max_num_pending_blocks as usize); + check_num_verified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_remove_blocks_for_epoch_round_verified() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some verified blocks to the payload store for the current epoch + let current_epoch = 0; + let num_blocks_in_store = 100; + let verified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + current_epoch, + true, + ); + + // Remove all the blocks for the given epoch and round + block_payload_store.remove_blocks_for_epoch_round(current_epoch, 49); + + // Check that the block payload store no longer contains the removed blocks + let block_payloads = block_payload_store.get_block_payloads(); + for verified_block in verified_blocks.iter().take(50) { + assert!(!block_payloads + .lock() + .contains_key(&(verified_block.epoch(), verified_block.round()))); + } + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, num_blocks_in_store - 50); + + // Remove all the blocks for the given epoch and round + block_payload_store + .remove_blocks_for_epoch_round(current_epoch, num_blocks_in_store as Round); + + // Check that the block payload store no longer contains any blocks + let block_payloads = block_payload_store.get_block_payloads(); + assert!(block_payloads.lock().is_empty()); + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, 0); + + // Add some verified blocks to the payload store for the next epoch + let next_epoch = current_epoch + 1; + create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + next_epoch, + true, + ); + + // Remove all the blocks for the future epoch and round + let future_epoch = next_epoch + 1; + block_payload_store.remove_blocks_for_epoch_round(future_epoch, 0); + + // Verify the store is now empty + check_num_verified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_remove_blocks_for_epoch_round_unverified() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some unverified blocks to the payload store for the current epoch + let current_epoch = 10; + let num_blocks_in_store = 100; + let unverified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + current_epoch, + false, + ); + + // Remove all the blocks for the given epoch and round + block_payload_store.remove_blocks_for_epoch_round(current_epoch, 49); + + // Check that the block payload store no longer contains the removed blocks + for unverified_block in unverified_blocks.iter().take(50) { + assert!(!block_payload_store + .block_payloads + .lock() + .contains_key(&(unverified_block.epoch(), unverified_block.round()))); + } + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, num_blocks_in_store - 50); + + // Remove all the blocks for the given epoch and round + block_payload_store + .remove_blocks_for_epoch_round(current_epoch, num_blocks_in_store as Round); + + // Check that the block payload store no longer contains any blocks + assert!(block_payload_store.block_payloads.lock().is_empty()); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, 0); + + // Add some unverified blocks to the payload store for the next epoch + let next_epoch = current_epoch + 1; + create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + next_epoch, + false, + ); + + // Remove all the blocks for the future epoch and round + let future_epoch = next_epoch + 10; + block_payload_store.remove_blocks_for_epoch_round(future_epoch, 0); + + // Verify the store is now empty + check_num_unverified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_remove_committed_blocks_verified() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some blocks to the payload store for the current epoch + let current_epoch = 0; + let num_blocks_in_store = 100; + let verified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + current_epoch, + true, + ); + + // Remove the first block from the block payload store + block_payload_store.remove_committed_blocks(&verified_blocks[0..1]); + + // Check that the block payload store no longer contains the removed block + let block_payloads = block_payload_store.get_block_payloads(); + let removed_block = &verified_blocks[0]; + assert!(!block_payloads + .lock() + .contains_key(&(removed_block.epoch(), removed_block.round()))); + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, num_blocks_in_store - 1); + + // Remove the last 5 blocks from the block payload store + block_payload_store.remove_committed_blocks(&verified_blocks[5..10]); + + // Check that the block payload store no longer contains the removed blocks + let block_payloads = block_payload_store.get_block_payloads(); + for verified_block in verified_blocks.iter().take(10).skip(5) { + assert!(!block_payloads + .lock() + .contains_key(&(verified_block.epoch(), verified_block.round()))); + } + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, num_blocks_in_store - 10); + + // Remove all the blocks from the block payload store (including some that don't exist) + block_payload_store.remove_committed_blocks(&verified_blocks[0..num_blocks_in_store]); + + // Check that the block payload store no longer contains any blocks + let block_payloads = block_payload_store.get_block_payloads(); + assert!(block_payloads.lock().is_empty()); + + // Verify the number of blocks in the block payload store + check_num_verified_payloads(&block_payload_store, 0); + + // Add some blocks to the payload store for the next epoch + let next_epoch = 1; + let verified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + next_epoch, + true, + ); + + // Remove the last committed block from the future epoch + block_payload_store.remove_committed_blocks(&verified_blocks[99..100]); + + // Check that the block payload store is now empty + check_num_verified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_remove_committed_blocks_unverified() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some blocks to the payload store for the current epoch + let current_epoch = 10; + let num_blocks_in_store = 100; + let unverified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + current_epoch, + false, + ); + + // Remove the first block from the block payload store + block_payload_store.remove_committed_blocks(&unverified_blocks[0..1]); + + // Check that the block payload store no longer contains the removed block + let removed_block = &unverified_blocks[0]; + assert!(!block_payload_store + .block_payloads + .lock() + .contains_key(&(removed_block.epoch(), removed_block.round()))); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, num_blocks_in_store - 1); + + // Remove the last 5 blocks from the block payload store + block_payload_store.remove_committed_blocks(&unverified_blocks[5..10]); + + // Check that the block payload store no longer contains the removed blocks + for verified_block in unverified_blocks.iter().take(10).skip(5) { + assert!(!block_payload_store + .block_payloads + .lock() + .contains_key(&(verified_block.epoch(), verified_block.round()))); + } + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, num_blocks_in_store - 10); + + // Remove all the blocks from the block payload store (including some that don't exist) + block_payload_store.remove_committed_blocks(&unverified_blocks[0..num_blocks_in_store]); + + // Check that the block payload store no longer contains any blocks + assert!(block_payload_store.block_payloads.lock().is_empty()); + + // Verify the number of blocks in the block payload store + check_num_unverified_payloads(&block_payload_store, 0); + + // Add some blocks to the payload store for the next epoch + let next_epoch = 11; + let unverified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_blocks_in_store, + next_epoch, + false, + ); + + // Remove the last committed block from the future epoch + block_payload_store.remove_committed_blocks(&unverified_blocks[99..100]); + + // Check that the block payload store is now empty + check_num_unverified_payloads(&block_payload_store, 0); + } + + #[test] + fn test_verify_payload_signatures() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some verified blocks for the current epoch + let current_epoch = 0; + let num_verified_blocks = 10; + create_and_add_blocks_to_store( + &mut block_payload_store, + num_verified_blocks, + current_epoch, + true, + ); + + // Add some unverified blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_unverified_blocks = 20; + let unverified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_unverified_blocks, + next_epoch, + false, + ); + + // Add some unverified blocks for a future epoch + let future_epoch = current_epoch + 30; + let num_future_blocks = 30; + let future_unverified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_future_blocks, + future_epoch, + false, + ); + + // Create an epoch state for the next epoch (with an empty verifier) + let epoch_state = EpochState::new(next_epoch, ValidatorVerifier::new(vec![])); + + // Verify the block payload signatures + let verified_rounds = block_payload_store.verify_payload_signatures(&epoch_state); + + // Verify the unverified payloads were moved to the verified store + assert!(block_payload_store.all_payloads_exist(&unverified_blocks)); + assert_eq!( + get_num_verified_payloads(&block_payload_store), + num_verified_blocks + num_unverified_blocks + ); + assert_eq!( + get_num_unverified_payloads(&block_payload_store), + num_future_blocks + ); + + // Check the rounds of the newly verified payloads + let expected_verified_rounds = unverified_blocks + .iter() + .map(|block| block.round()) + .collect::>(); + assert_eq!(verified_rounds, expected_verified_rounds); + + // Clear the verified blocks and check the verified blocks are empty + block_payload_store.remove_committed_blocks(&unverified_blocks); + assert_eq!(get_num_verified_payloads(&block_payload_store), 0); + + // Create an epoch state for the future epoch (with an empty verifier) + let epoch_state = EpochState::new(future_epoch, ValidatorVerifier::new(vec![])); + + // Verify the block payload signatures for a future epoch + let verified_rounds = block_payload_store.verify_payload_signatures(&epoch_state); + + // Verify the future unverified payloads were moved to the verified store + assert!(block_payload_store.all_payloads_exist(&future_unverified_blocks)); + assert_eq!( + get_num_verified_payloads(&block_payload_store), + num_future_blocks + ); + assert_eq!(get_num_unverified_payloads(&block_payload_store), 0); + + // Check the rounds of the newly verified payloads + let expected_verified_rounds = future_unverified_blocks + .iter() + .map(|block| block.round()) + .collect::>(); + assert_eq!(verified_rounds, expected_verified_rounds); + } + + #[test] + fn test_verify_payloads_against_ordered_block() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some verified blocks for the current epoch + let current_epoch = 0; + let num_verified_blocks = 10; + let verified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_verified_blocks, + current_epoch, + true, + ); + + // Create an ordered block using the verified blocks + let ordered_block = OrderedBlock::new( + verified_blocks.clone(), + create_empty_ledger_info(current_epoch), + ); + + // Verify the ordered block and ensure it passes + block_payload_store + .verify_payloads_against_ordered_block(&ordered_block) + .unwrap(); + + // Mark the first block payload as unverified + mark_payload_as_unverified(&block_payload_store, &verified_blocks[0]); + + // Verify the ordered block and ensure it fails (since the payloads are unverified) + let error = block_payload_store + .verify_payloads_against_ordered_block(&ordered_block) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + + // Clear the block payload store + block_payload_store.clear_all_payloads(); + + // Verify the ordered block and ensure it fails (since the payloads are missing) + let error = block_payload_store + .verify_payloads_against_ordered_block(&ordered_block) + .unwrap_err(); + assert_matches!(error, Error::InvalidMessageError(_)); + } + + #[test] + fn test_verify_payload_signatures_failure() { + // Create a new block payload store + let consensus_observer_config = ConsensusObserverConfig::default(); + let mut block_payload_store = BlockPayloadStore::new(consensus_observer_config); + + // Add some verified blocks for the current epoch + let current_epoch = 10; + let num_verified_blocks = 6; + create_and_add_blocks_to_store( + &mut block_payload_store, + num_verified_blocks, + current_epoch, + true, + ); + + // Add some unverified blocks for the next epoch + let next_epoch = current_epoch + 1; + let num_unverified_blocks = 15; + let unverified_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_unverified_blocks, + next_epoch, + false, + ); + + // Add some unverified blocks for a future epoch + let future_epoch = next_epoch + 1; + let num_future_blocks = 10; + let unverified_future_blocks = create_and_add_blocks_to_store( + &mut block_payload_store, + num_future_blocks, + future_epoch, + false, + ); + + // Create an epoch state for the next epoch (with a non-empty verifier) + let validator_signer = ValidatorSigner::random(None); + let validator_consensus_info = ValidatorConsensusInfo::new( + validator_signer.author(), + validator_signer.public_key(), + 100, + ); + let validator_verifier = ValidatorVerifier::new(vec![validator_consensus_info]); + let epoch_state = EpochState::new(next_epoch, validator_verifier.clone()); + + // Verify the block payload signatures (for this epoch) + block_payload_store.verify_payload_signatures(&epoch_state); + + // Ensure the unverified payloads were not verified + assert!(!block_payload_store.all_payloads_exist(&unverified_blocks)); + + // Ensure the unverified payloads were all removed (for this epoch) + assert_eq!( + get_num_unverified_payloads(&block_payload_store), + num_future_blocks + ); + + // Create an epoch state for the future epoch (with a non-empty verifier) + let epoch_state = EpochState::new(future_epoch, validator_verifier); + + // Verify the block payload signatures (for the future epoch) + block_payload_store.verify_payload_signatures(&epoch_state); + + // Ensure the future unverified payloads were not verified + assert!(!block_payload_store.all_payloads_exist(&unverified_future_blocks)); + + // Ensure the future unverified payloads were all removed (for the future epoch) + assert_eq!(get_num_unverified_payloads(&block_payload_store), 0); + } + + /// Creates and adds the given number of blocks to the block payload store + fn create_and_add_blocks_to_store( + block_payload_store: &mut BlockPayloadStore, + num_blocks: usize, + epoch: u64, + verified_payload_signatures: bool, + ) -> Vec> { + let mut pipelined_blocks = vec![]; + for i in 0..num_blocks { + // Create the block info + let block_info = BlockInfo::new( + epoch, + i as Round, + HashValue::random(), + HashValue::random(), + i as Version, + i as u64, + None, + ); + + // Create the block transaction payload with proofs of store + let mut proofs_of_store = vec![]; + for _ in 0..10 { + let batch_info = BatchInfo::new( + PeerId::random(), + BatchId::new(0), + epoch, + 0, + HashValue::random(), + 0, + 0, + 0, + ); + proofs_of_store.push(ProofOfStore::new(batch_info, AggregateSignature::empty())); + } + let block_transaction_payload = BlockTransactionPayload::new_quorum_store_inline_hybrid( + vec![], + proofs_of_store.clone(), + None, + vec![], + ); + + // Insert the block payload into the store + let block_payload = BlockPayload::new(block_info.clone(), block_transaction_payload); + block_payload_store.insert_block_payload(block_payload, verified_payload_signatures); + + // Create the block type + let payload = Payload::InQuorumStore(ProofWithData::new(proofs_of_store)); + let block_type = BlockType::DAGBlock { + author: Author::random(), + failed_authors: vec![], + validator_txns: vec![], + payload, + node_digests: vec![], + parent_block_id: HashValue::random(), + parents_bitvec: BitVec::with_num_bits(0), + }; + + // Create the equivalent pipelined block + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + block_type, + ); + let block = Block::new_for_testing(block_info.id(), block_data, None); + let pipelined_block = Arc::new(PipelinedBlock::new_ordered(block)); + + // Add the pipelined block to the list + pipelined_blocks.push(pipelined_block.clone()); + } + + pipelined_blocks + } + + /// Creates a new block payload with the given epoch and round + fn create_block_payload(epoch: u64, round: Round) -> BlockPayload { + let block_info = BlockInfo::random_with_epoch(epoch, round); + BlockPayload::new(block_info, BlockTransactionPayload::empty()) + } + + /// Checks the number of unverified payloads in the block payload store + fn check_num_unverified_payloads( + block_payload_store: &BlockPayloadStore, + expected_num_payloads: usize, + ) { + let num_payloads = get_num_unverified_payloads(block_payload_store); + assert_eq!(num_payloads, expected_num_payloads); + } + + /// Checks the number of verified payloads in the block payload store + fn check_num_verified_payloads( + block_payload_store: &BlockPayloadStore, + expected_num_payloads: usize, + ) { + let num_payloads = get_num_verified_payloads(block_payload_store); + assert_eq!(num_payloads, expected_num_payloads); + } + + /// Creates and returns a new ledger info with an empty signature set + fn create_empty_ledger_info(epoch: u64) -> LedgerInfoWithSignatures { + LedgerInfoWithSignatures::new( + LedgerInfo::new(BlockInfo::random_with_epoch(epoch, 0), HashValue::random()), + AggregateSignature::empty(), + ) + } + + /// Returns the number of unverified payloads in the block payload store + fn get_num_unverified_payloads(block_payload_store: &BlockPayloadStore) -> usize { + let mut num_unverified_payloads = 0; + for (_, block_payload_status) in block_payload_store.block_payloads.lock().iter() { + if let BlockPayloadStatus::AvailableAndUnverified(_) = block_payload_status { + num_unverified_payloads += 1; + } + } + num_unverified_payloads + } + + /// Returns the number of verified payloads in the block payload store + fn get_num_verified_payloads(block_payload_store: &BlockPayloadStore) -> usize { + let mut num_verified_payloads = 0; + for (_, block_payload_status) in block_payload_store.block_payloads.lock().iter() { + if let BlockPayloadStatus::AvailableAndVerified(_) = block_payload_status { + num_verified_payloads += 1; + } + } + num_verified_payloads + } + + /// Marks the payload of the given block as unverified + fn mark_payload_as_unverified( + block_payload_store: &BlockPayloadStore, + block: &Arc, + ) { + // Get the payload entry for the given block + let block_payloads = block_payload_store.get_block_payloads(); + let mut block_payloads = block_payloads.lock(); + let block_payload = block_payloads + .get_mut(&(block.epoch(), block.round())) + .unwrap(); + + // Mark the block payload as unverified + *block_payload = BlockPayloadStatus::AvailableAndUnverified(BlockPayload::new( + block.block_info(), + BlockTransactionPayload::empty(), + )); + } +} diff --git a/consensus/src/consensus_observer/observer/pending_blocks.rs b/consensus/src/consensus_observer/observer/pending_blocks.rs new file mode 100644 index 0000000000000..2a7ebbde0519f --- /dev/null +++ b/consensus/src/consensus_observer/observer/pending_blocks.rs @@ -0,0 +1,890 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::{ + logging::{LogEntry, LogSchema}, + metrics, + }, + network::observer_message::OrderedBlock, + observer::payload_store::BlockPayloadStore, +}; +use aptos_config::config::ConsensusObserverConfig; +use aptos_infallible::Mutex; +use aptos_logger::{info, warn}; +use aptos_types::block_info::Round; +use std::{ + collections::{btree_map::Entry, BTreeMap}, + sync::Arc, +}; + +/// A simple struct to hold blocks that are waiting for payloads +pub struct PendingBlockStore { + // The configuration of the consensus observer + consensus_observer_config: ConsensusObserverConfig, + + // A map of ordered blocks that are without payloads. The key is + // the (epoch, round) of the first block in the ordered block. + blocks_without_payloads: BTreeMap<(u64, Round), OrderedBlock>, +} + +impl PendingBlockStore { + pub fn new(consensus_observer_config: ConsensusObserverConfig) -> Self { + Self { + consensus_observer_config, + blocks_without_payloads: BTreeMap::new(), + } + } + + /// Clears all missing blocks from the store + pub fn clear_missing_blocks(&mut self) { + self.blocks_without_payloads.clear(); + } + + /// Returns true iff the store contains an entry for the given ordered block + pub fn existing_pending_block(&self, ordered_block: &OrderedBlock) -> bool { + // Get the epoch and round of the first block + let first_block = ordered_block.first_block(); + let first_block_epoch_round = (first_block.epoch(), first_block.round()); + + // Check if the block is already in the store + self.blocks_without_payloads + .contains_key(&first_block_epoch_round) + } + + /// Inserts a block (without payloads) into the store + pub fn insert_pending_block(&mut self, ordered_block: OrderedBlock) { + // Get the epoch and round of the first block + let first_block = ordered_block.first_block(); + let first_block_epoch_round = (first_block.epoch(), first_block.round()); + + // Insert the block into the store using the round of the first block + match self.blocks_without_payloads.entry(first_block_epoch_round) { + Entry::Occupied(_) => { + // The block is already in the store + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "A pending block was already found for the given epoch and round: {:?}", + first_block_epoch_round + )) + ); + }, + Entry::Vacant(entry) => { + // Insert the block into the store + entry.insert(ordered_block); + }, + } + + // Perform garbage collection if the store is too large + self.garbage_collect_pending_blocks(); + } + + /// Garbage collects the pending blocks store by removing + /// the oldest blocks if the store is too large. + fn garbage_collect_pending_blocks(&mut self) { + // Calculate the number of blocks to remove + let num_pending_blocks = self.blocks_without_payloads.len() as u64; + let max_pending_blocks = self.consensus_observer_config.max_num_pending_blocks; + let num_blocks_to_remove = num_pending_blocks.saturating_sub(max_pending_blocks); + + // Remove the oldest blocks if the store is too large + for _ in 0..num_blocks_to_remove { + if let Some((oldest_epoch_round, _)) = self.blocks_without_payloads.pop_first() { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "The pending block store is too large: {:?} blocks. Removing the block for the oldest epoch and round: {:?}", + num_pending_blocks, oldest_epoch_round + )) + ); + } + } + } + + /// Removes and returns the block from the store that is now ready + /// to be processed (after the new payload has been received). + pub fn remove_ready_block( + &mut self, + received_payload_epoch: u64, + received_payload_round: Round, + block_payload_store: Arc>, + ) -> Option { + // Calculate the round at which to split the blocks + let split_round = received_payload_round.saturating_add(1); + + // Split the blocks at the epoch and round + let mut blocks_at_higher_rounds = self + .blocks_without_payloads + .split_off(&(received_payload_epoch, split_round)); + + // Check if the last block is ready (this should be the only ready block). + // Any earlier blocks are considered out-of-date and will be dropped. + let mut ready_block = None; + if let Some((epoch_and_round, ordered_block)) = self.blocks_without_payloads.pop_last() { + // If all payloads exist for the block, then the block is ready + if block_payload_store + .lock() + .all_payloads_exist(ordered_block.blocks()) + { + ready_block = Some(ordered_block); + } else { + // Otherwise, check if we're still waiting for higher payloads for the block + if ordered_block.last_block().round() > received_payload_round { + blocks_at_higher_rounds.insert(epoch_and_round, ordered_block); + } + } + } + + // Check if any out-of-date blocks were dropped + if !self.blocks_without_payloads.is_empty() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Dropped {:?} out-of-date pending blocks before epoch and round: {:?}", + self.blocks_without_payloads.len(), + (received_payload_epoch, received_payload_round) + )) + ); + } + + // Update the pending blocks to only include the blocks at higher rounds + self.blocks_without_payloads = blocks_at_higher_rounds; + + // Return the ready block (if one exists) + ready_block + } + + /// Updates the metrics for the pending blocks + pub fn update_pending_blocks_metrics(&self) { + // Update the number of pending block entries + let num_entries = self.blocks_without_payloads.len() as u64; + metrics::set_gauge_with_label( + &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, + metrics::PENDING_BLOCK_ENTRIES_LABEL, + num_entries, + ); + + // Update the total number of pending blocks + let num_pending_blocks = self + .blocks_without_payloads + .values() + .map(|block| block.blocks().len() as u64) + .sum(); + metrics::set_gauge_with_label( + &metrics::OBSERVER_NUM_PROCESSED_BLOCKS, + metrics::PENDING_BLOCKS_LABEL, + num_pending_blocks, + ); + + // Update the highest round for the pending blocks + let highest_pending_round = self + .blocks_without_payloads + .last_key_value() + .map(|(_, pending_block)| pending_block.last_block().round()) + .unwrap_or(0); + metrics::set_gauge_with_label( + &metrics::OBSERVER_PROCESSED_BLOCK_ROUNDS, + metrics::PENDING_BLOCKS_LABEL, + highest_pending_round, + ); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::consensus_observer::{ + network::observer_message::{BlockPayload, BlockTransactionPayload}, + observer::payload_store::BlockPayloadStore, + }; + use aptos_consensus_types::{ + block::Block, + block_data::{BlockData, BlockType}, + pipelined_block::PipelinedBlock, + quorum_cert::QuorumCert, + }; + use aptos_crypto::HashValue; + use aptos_types::{ + aggregate_signature::AggregateSignature, + block_info::BlockInfo, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + }; + use rand::Rng; + + #[test] + fn test_clear_missing_blocks() { + // Create a new pending block store + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 0; + let starting_round = 0; + let missing_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Verify that the store is not empty + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &missing_blocks, + ); + + // Clear the missing blocks from the store + pending_block_store.lock().clear_missing_blocks(); + + // Verify that the store is now empty + assert!(pending_block_store + .lock() + .blocks_without_payloads + .is_empty()); + } + + #[test] + fn test_existing_pending_block() { + // Create a new pending block store + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + ConsensusObserverConfig::default(), + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 10; + let starting_round = 100; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Verify that all blocks were inserted correctly + for pending_block in &pending_blocks { + assert!(pending_block_store + .lock() + .existing_pending_block(pending_block)); + } + + // Create a new block payload store and insert payloads for the second block + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); + let second_block = pending_blocks[1].clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &second_block); + + // Remove the second block (which is now ready) + let payload_round = second_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + assert_eq!(ready_block, Some(second_block)); + + // Verify that the first and second blocks were removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 2, + &pending_blocks[2..].to_vec(), + ); + + // Verify that the first and second blocks are no longer in the store + for pending_block in &pending_blocks[..2] { + assert!(!pending_block_store + .lock() + .existing_pending_block(pending_block)); + } + } + + #[test] + fn test_insert_pending_block() { + // Create a new pending block store + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 0; + let starting_round = 0; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Verify that all blocks were inserted correctly + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &pending_blocks, + ); + + // Insert the maximum number of blocks into the store again + let starting_round = (max_num_pending_blocks * 100) as Round; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Verify that all blocks were inserted correctly + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &pending_blocks, + ); + + // Insert one more block into the store (for the next epoch) + let next_epoch = 1; + let starting_round = 0; + let new_pending_block = create_and_add_pending_blocks( + pending_block_store.clone(), + 1, + next_epoch, + starting_round, + 5, + ); + + // Verify the new block was inserted correctly + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &new_pending_block, + ); + } + + #[test] + fn test_garbage_collect_pending_blocks() { + // Create a new pending block store + let max_num_pending_blocks = 100; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 0; + let starting_round = 200; + let mut pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Verify that all blocks were inserted correctly + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &pending_blocks, + ); + + // Insert multiple blocks into the store (one at a time) and + // verify that the oldest block is garbage collected each time. + for i in 0..20 { + // Insert one more block into the store + let starting_round = ((max_num_pending_blocks * 10) + (i * 100)) as Round; + let new_pending_block = create_and_add_pending_blocks( + pending_block_store.clone(), + 1, + current_epoch, + starting_round, + 5, + ); + + // Verify the new block was inserted correctly + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &new_pending_block, + ); + + // Get the round of the oldest block (that was garbage collected) + let oldest_block = pending_blocks.remove(0); + let oldest_block_round = oldest_block.first_block().round(); + + // Verify that the oldest block was garbage collected + let blocks_without_payloads = + pending_block_store.lock().blocks_without_payloads.clone(); + assert!(!blocks_without_payloads.contains_key(&(current_epoch, oldest_block_round))); + } + + // Insert multiple blocks into the store (for the next epoch) and + // verify that the oldest block is garbage collected each time. + let next_epoch = 1; + for i in 0..20 { + // Insert one more block into the store + let starting_round = i; + let new_pending_block = create_and_add_pending_blocks( + pending_block_store.clone(), + 1, + next_epoch, + starting_round, + 5, + ); + + // Verify the new block was inserted correctly + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &new_pending_block, + ); + + // Get the round of the oldest block (that was garbage collected) + let oldest_block = pending_blocks.remove(0); + let oldest_block_round = oldest_block.first_block().round(); + + // Verify that the oldest block was garbage collected + let blocks_without_payloads = + pending_block_store.lock().blocks_without_payloads.clone(); + assert!(!blocks_without_payloads.contains_key(&(current_epoch, oldest_block_round))); + } + } + + #[test] + fn test_remove_ready_block_multiple_blocks() { + // Create a new pending block store + let max_num_pending_blocks = 40; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 0; + let starting_round = 0; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Create a new block payload store and insert payloads for the second block + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); + let second_block = pending_blocks[1].clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &second_block); + + // Remove the second block (which is now ready) + let payload_round = second_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + assert_eq!(ready_block, Some(second_block)); + + // Verify that the first and second blocks were removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 2, + &pending_blocks[2..].to_vec(), + ); + + // Insert payloads for the last block + let last_block = pending_blocks.last().unwrap().clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &last_block); + + // Remove the last block (which is now ready) + let payload_round = last_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + + // Verify that the last block was removed + assert_eq!(ready_block, Some(last_block)); + + // Verify that the store is empty + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); + } + + #[test] + fn test_remove_ready_block_multiple_blocks_missing() { + // Create a new pending block store + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 10; + let starting_round = 100; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 5, + ); + + // Create an empty block payload store + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); + + // Incrementally insert and process each payload for the first block + let first_block = pending_blocks.first().unwrap().clone(); + for block in first_block.blocks().clone() { + // Insert the block + let block_payload = + BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); + + // Attempt to remove the block (which might not be ready) + let payload_round = block.round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + + // If the block is ready, verify that it was removed. + // Otherwise, verify that the block still remains. + if payload_round == first_block.last_block().round() { + // The block should be ready + assert_eq!(ready_block, Some(first_block.clone())); + + // Verify that the block was removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 1, + &pending_blocks[1..].to_vec(), + ); + } else { + // The block should not be ready + assert!(ready_block.is_none()); + + // Verify that the block still remains + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + &pending_blocks, + ); + } + } + + // Incrementally insert and process payloads for the last block (except one) + let last_block = pending_blocks.last().unwrap().clone(); + for block in last_block.blocks().clone() { + // Insert the block only if this is not the first block + let payload_round = block.round(); + if payload_round != last_block.first_block().round() { + let block_payload = + BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); + } + + // Attempt to remove the block (which might not be ready) + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + + // The block should not be ready + assert!(ready_block.is_none()); + + // Verify that the block still remains or has been removed on the last insert + if payload_round == last_block.last_block().round() { + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); + } else { + verify_pending_blocks(pending_block_store.clone(), 1, &vec![last_block.clone()]); + } + } + + // Verify that the store is now empty + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); + } + + #[test] + fn test_remove_ready_block_singular_blocks() { + // Create a new pending block store + let max_num_pending_blocks = 10; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 0; + let starting_round = 0; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 1, + ); + + // Create a new block payload store and insert payloads for the first block + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); + let first_block = pending_blocks.first().unwrap().clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &first_block); + + // Remove the first block (which is now ready) + let payload_round = first_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + assert_eq!(ready_block, Some(first_block)); + + // Verify that the first block was removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 1, + &pending_blocks[1..].to_vec(), + ); + + // Insert payloads for the second block + let second_block = pending_blocks[1].clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &second_block); + + // Remove the second block (which is now ready) + let payload_round = second_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + assert_eq!(ready_block, Some(second_block)); + + // Verify that the first and second blocks were removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 2, + &pending_blocks[2..].to_vec(), + ); + + // Insert payloads for the last block + let last_block = pending_blocks.last().unwrap().clone(); + insert_payloads_for_ordered_block(block_payload_store.clone(), &last_block); + + // Remove the last block (which is now ready) + let payload_round = last_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + payload_round, + block_payload_store.clone(), + ); + + // Verify that the last block was removed + assert_eq!(ready_block, Some(last_block)); + + // Verify that the store is empty + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); + } + + #[test] + fn test_remove_ready_block_singular_blocks_missing() { + // Create a new pending block store + let max_num_pending_blocks = 100; + let consensus_observer_config = ConsensusObserverConfig { + max_num_pending_blocks: max_num_pending_blocks as u64, + ..ConsensusObserverConfig::default() + }; + let pending_block_store = Arc::new(Mutex::new(PendingBlockStore::new( + consensus_observer_config, + ))); + + // Insert the maximum number of blocks into the store + let current_epoch = 10; + let starting_round = 100; + let pending_blocks = create_and_add_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks, + current_epoch, + starting_round, + 1, + ); + + // Create an empty block payload store + let block_payload_store = Arc::new(Mutex::new(BlockPayloadStore::new( + consensus_observer_config, + ))); + + // Remove the third block (which is not ready) + let third_block = pending_blocks[2].clone(); + let third_block_round = third_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + third_block_round, + block_payload_store.clone(), + ); + assert!(ready_block.is_none()); + + // Verify that the first three blocks were removed + verify_pending_blocks( + pending_block_store.clone(), + max_num_pending_blocks - 3, + &pending_blocks[3..].to_vec(), + ); + + // Remove the last block (which is not ready) + let last_block = pending_blocks.last().unwrap().clone(); + let last_block_round = last_block.first_block().round(); + let ready_block = pending_block_store.lock().remove_ready_block( + current_epoch, + last_block_round, + block_payload_store.clone(), + ); + assert!(ready_block.is_none()); + + // Verify that the store is now empty + verify_pending_blocks(pending_block_store.clone(), 0, &vec![]); + } + + /// Creates and adds the specified number of blocks to the pending block store + fn create_and_add_pending_blocks( + pending_block_store: Arc>, + num_pending_blocks: usize, + epoch: u64, + starting_round: Round, + max_pipelined_blocks: u64, + ) -> Vec { + let mut pending_blocks = vec![]; + for i in 0..num_pending_blocks { + // Create the pipelined blocks + let num_pipelined_blocks = rand::thread_rng().gen_range(1, max_pipelined_blocks + 1); + let mut pipelined_blocks = vec![]; + for j in 0..num_pipelined_blocks { + // Calculate the block round + let round = starting_round + ((i as Round) * max_pipelined_blocks) + j; // Ensure gaps between blocks + + // Create a new block info + let block_info = BlockInfo::new( + epoch, + round, + HashValue::random(), + HashValue::random(), + round, + i as u64, + None, + ); + + // Create the pipelined block + let block_data = BlockData::new_for_testing( + block_info.epoch(), + block_info.round(), + block_info.timestamp_usecs(), + QuorumCert::dummy(), + BlockType::Genesis, + ); + let block = Block::new_for_testing(block_info.id(), block_data, None); + let pipelined_block = Arc::new(PipelinedBlock::new_ordered(block)); + + // Add the pipelined block to the list + pipelined_blocks.push(pipelined_block); + } + + // Create an ordered block + let ordered_proof = LedgerInfoWithSignatures::new( + LedgerInfo::new( + BlockInfo::random_with_epoch(epoch, starting_round), + HashValue::random(), + ), + AggregateSignature::empty(), + ); + let ordered_block = OrderedBlock::new(pipelined_blocks, ordered_proof.clone()); + + // Insert the ordered block into the pending block store + pending_block_store + .lock() + .insert_pending_block(ordered_block.clone()); + + // Add the ordered block to the pending blocks + pending_blocks.push(ordered_block); + } + + pending_blocks + } + + /// Inserts payloads into the payload store for the ordered block + fn insert_payloads_for_ordered_block( + block_payload_store: Arc>, + ordered_block: &OrderedBlock, + ) { + for block in ordered_block.blocks() { + let block_payload = + BlockPayload::new(block.block_info(), BlockTransactionPayload::empty()); + block_payload_store + .lock() + .insert_block_payload(block_payload, true); + } + } + + /// Verifies that the pending block store contains the expected blocks + fn verify_pending_blocks( + pending_block_store: Arc>, + num_expected_blocks: usize, + pending_blocks: &Vec, + ) { + // Check the number of pending blocks + assert_eq!( + pending_block_store.lock().blocks_without_payloads.len(), + num_expected_blocks + ); + + // Check that all pending blocks are in the store + for pending_block in pending_blocks { + let first_block = pending_block.first_block(); + assert_eq!( + pending_block_store + .lock() + .blocks_without_payloads + .get(&(first_block.epoch(), first_block.round())) + .unwrap(), + pending_block + ); + } + } +} diff --git a/consensus/src/consensus_observer/observer/subscription.rs b/consensus/src/consensus_observer/observer/subscription.rs new file mode 100644 index 0000000000000..5d9ae4d43def1 --- /dev/null +++ b/consensus/src/consensus_observer/observer/subscription.rs @@ -0,0 +1,917 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{common::error::Error, observer::subscription_utils}; +use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; +use aptos_network::application::metadata::PeerMetadata; +use aptos_storage_interface::DbReader; +use aptos_time_service::{TimeService, TimeServiceTrait}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; + +/// A single consensus observer subscription +pub struct ConsensusObserverSubscription { + // The configuration of the consensus observer + consensus_observer_config: ConsensusObserverConfig, + + // A handle to storage (used to read the latest state and check progress) + db_reader: Arc, + + // The peer network id of the active subscription + peer_network_id: PeerNetworkId, + + // The timestamp of the last message received for the subscription + last_message_receive_time: Instant, + + // The timestamp and connected peers for the last optimality check + last_optimality_check_time_and_peers: (Instant, HashSet), + + // The highest synced version we've seen from storage, along with the time at which it was seen + highest_synced_version_and_time: (u64, Instant), + + // The time service (used to check the last message receive time) + time_service: TimeService, +} + +impl ConsensusObserverSubscription { + pub fn new( + consensus_observer_config: ConsensusObserverConfig, + db_reader: Arc, + peer_network_id: PeerNetworkId, + time_service: TimeService, + ) -> Self { + // Get the current time + let time_now = time_service.now(); + + // Create a new subscription + Self { + consensus_observer_config, + db_reader, + peer_network_id, + last_message_receive_time: time_now, + last_optimality_check_time_and_peers: (time_now, HashSet::new()), + highest_synced_version_and_time: (0, time_now), + time_service, + } + } + + /// Checks if the subscription is still healthy. If not, an error + /// is returned indicating the reason for the subscription failure. + pub fn check_subscription_health( + &mut self, + connected_peers_and_metadata: &HashMap, + ) -> Result<(), Error> { + // Verify the subscription peer is still connected + let peer_network_id = self.get_peer_network_id(); + if !connected_peers_and_metadata.contains_key(&peer_network_id) { + return Err(Error::SubscriptionDisconnected(format!( + "The peer: {:?} is no longer connected!", + peer_network_id + ))); + } + + // Verify the subscription has not timed out + self.check_subscription_timeout()?; + + // Verify that the DB is continuing to sync and commit new data + self.check_syncing_progress()?; + + // Verify that the subscription peer is still optimal + self.check_subscription_peer_optimality(connected_peers_and_metadata)?; + + // The subscription seems healthy + Ok(()) + } + + /// Verifies that the peer currently selected for the subscription is + /// optimal. This is only done if: (i) the peers have changed since the + /// last check; or (ii) enough time has elapsed to force a refresh. + fn check_subscription_peer_optimality( + &mut self, + peers_and_metadata: &HashMap, + ) -> Result<(), Error> { + // Get the last optimality check time and connected peers + let (last_optimality_check_time, last_optimality_check_peers) = + self.last_optimality_check_time_and_peers.clone(); + + // Determine if enough time has elapsed to force a refresh + let time_now = self.time_service.now(); + let duration_since_last_check = time_now.duration_since(last_optimality_check_time); + let refresh_interval = Duration::from_millis( + self.consensus_observer_config + .subscription_refresh_interval_ms, + ); + let force_refresh = duration_since_last_check >= refresh_interval; + + // Determine if the peers have changed since the last check. + // Note: we only check for peer changes periodically to avoid + // excessive subscription churn due to peer connects/disconnects. + let current_connected_peers = peers_and_metadata.keys().cloned().collect(); + let peer_check_interval = Duration::from_millis( + self.consensus_observer_config + .subscription_peer_change_interval_ms, + ); + let peers_changed = duration_since_last_check >= peer_check_interval + && current_connected_peers != last_optimality_check_peers; + + // Determine if we should perform the optimality check + if !force_refresh && !peers_changed { + return Ok(()); // We don't need to check optimality yet + } + + // Otherwise, update the last peer optimality check time and peers + self.last_optimality_check_time_and_peers = (time_now, current_connected_peers); + + // Sort the peers by subscription optimality + let sorted_peers = + subscription_utils::sort_peers_by_subscription_optimality(peers_and_metadata); + + // Verify that this peer is one of the most optimal peers + let max_concurrent_subscriptions = + self.consensus_observer_config.max_concurrent_subscriptions as usize; + if !sorted_peers + .iter() + .take(max_concurrent_subscriptions) + .any(|peer| peer == &self.peer_network_id) + { + return Err(Error::SubscriptionSuboptimal(format!( + "Subscription to peer: {} is no longer optimal! New optimal peers: {:?}", + self.peer_network_id, sorted_peers + ))); + } + + Ok(()) + } + + /// Verifies that the subscription has not timed out based + /// on the last received message time. + fn check_subscription_timeout(&self) -> Result<(), Error> { + // Calculate the duration since the last message + let time_now = self.time_service.now(); + let duration_since_last_message = time_now.duration_since(self.last_message_receive_time); + + // Check if the subscription has timed out + if duration_since_last_message + > Duration::from_millis(self.consensus_observer_config.max_subscription_timeout_ms) + { + return Err(Error::SubscriptionTimeout(format!( + "Subscription to peer: {} has timed out! No message received for: {:?}", + self.peer_network_id, duration_since_last_message + ))); + } + + Ok(()) + } + + /// Verifies that the DB is continuing to sync and commit new data + fn check_syncing_progress(&mut self) -> Result<(), Error> { + // Get the current synced version from storage + let current_synced_version = + self.db_reader + .get_latest_ledger_info_version() + .map_err(|error| { + Error::UnexpectedError(format!( + "Failed to read highest synced version: {:?}", + error + )) + })?; + + // Verify that the synced version is increasing appropriately + let (highest_synced_version, highest_version_timestamp) = + self.highest_synced_version_and_time; + if current_synced_version <= highest_synced_version { + // The synced version hasn't increased. Check if we should terminate + // the subscription based on the last time the highest synced version was seen. + let time_now = self.time_service.now(); + let duration_since_highest_seen = time_now.duration_since(highest_version_timestamp); + if duration_since_highest_seen + > Duration::from_millis( + self.consensus_observer_config.max_synced_version_timeout_ms, + ) + { + return Err(Error::SubscriptionProgressStopped(format!( + "The DB is not making sync progress! Highest synced version: {}, elapsed: {:?}", + highest_synced_version, duration_since_highest_seen + ))); + } + } + + // Update the highest synced version and time + self.highest_synced_version_and_time = (current_synced_version, self.time_service.now()); + + Ok(()) + } + + /// Returns the peer network id of the subscription + pub fn get_peer_network_id(&self) -> PeerNetworkId { + self.peer_network_id + } + + /// Updates the last message receive time to the current time + pub fn update_last_message_receive_time(&mut self) { + self.last_message_receive_time = self.time_service.now(); + } +} + +#[cfg(test)] +mod test { + use super::*; + use aptos_config::config::PeerRole; + use aptos_netcore::transport::ConnectionOrigin; + use aptos_network::{ + protocols::wire::handshake::v1::{MessagingProtocolVersion, ProtocolIdSet}, + transport::{ConnectionId, ConnectionMetadata}, + ProtocolId, + }; + use aptos_peer_monitoring_service_types::PeerMonitoringMetadata; + use aptos_storage_interface::Result; + use aptos_types::{network_address::NetworkAddress, transaction::Version}; + use claims::assert_matches; + use mockall::mock; + + // This is a simple mock of the DbReader (it generates a MockDatabaseReader) + mock! { + pub DatabaseReader {} + impl DbReader for DatabaseReader { + fn get_latest_ledger_info_version(&self) -> Result; + } + } + + #[test] + fn test_check_subscription_health_connected_and_timeout() { + // Create a consensus observer config + let consensus_observer_config = ConsensusObserverConfig { + max_synced_version_timeout_ms: 100_000_000, // Use a large value so that we don't get DB progress errors + ..ConsensusObserverConfig::default() + }; + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Verify that the subscription is unhealthy (the peer is not connected) + assert_matches!( + subscription.check_subscription_health(&HashMap::new()), + Err(Error::SubscriptionDisconnected(_)) + ); + + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Elapse enough time to timeout the subscription + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Verify that the subscription has timed out + assert_matches!( + subscription.check_subscription_health(&peers_and_metadata), + Err(Error::SubscriptionTimeout(_)) + ); + } + + #[test] + fn test_check_subscription_health_progress() { + // Create a consensus observer config with a large timeout + let consensus_observer_config = ConsensusObserverConfig { + max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + ..ConsensusObserverConfig::default() + }; + + // Create a mock DB reader with expectations + let first_synced_version = 1; + let second_synced_version = 2; + let mut mock_db_reader = MockDatabaseReader::new(); + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(first_synced_version)) + .times(1); // Only allow one call for the first version + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(second_synced_version)); // Allow multiple calls for the second version + + // Create a new observer subscription + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(mock_db_reader), + peer_network_id, + time_service.clone(), + ); + + // Verify that the DB is making sync progress and that the highest synced version is updated + let mock_time_service = time_service.into_mock(); + verify_subscription_syncing_progress( + &mut subscription, + first_synced_version, + mock_time_service.now(), + ); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Verify that the DB is still making sync progress (the next version is higher) + verify_subscription_syncing_progress( + &mut subscription, + second_synced_version, + mock_time_service.now(), + ); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Verify that the DB is not making sync progress and that the subscription has timed out + assert_matches!( + subscription.check_syncing_progress(), + Err(Error::SubscriptionProgressStopped(_)) + ); + } + + #[test] + fn test_check_subscription_health_optimality() { + // Create a consensus observer config with a single subscription and large timeouts + let consensus_observer_config = ConsensusObserverConfig { + max_concurrent_subscriptions: 1, + max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + max_synced_version_timeout_ms: 100_000_000, // Use a large value so that we don't get DB progress errors + ..ConsensusObserverConfig::default() + }; + + // Create a mock DB reader with expectations + let mut mock_db_reader = MockDatabaseReader::new(); + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(1)); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(mock_db_reader), + peer_network_id, + time_service.clone(), + ); + + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Verify that the subscription is healthy + assert!(subscription + .check_subscription_health(&peers_and_metadata) + .is_ok()); + + // Add a more optimal peer to the set of peers + let new_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + + // Elapse enough time for a peer optimality check + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the subscription is no longer optimal + assert_matches!( + subscription.check_subscription_health(&peers_and_metadata), + Err(Error::SubscriptionSuboptimal(_)) + ); + } + + #[test] + fn test_check_subscription_peer_optimality_single() { + // Create a consensus observer config with a maximum of 1 subscription + let consensus_observer_config = create_observer_config(1); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Verify the time and peers for the last optimality check + let mock_time_service = time_service.into_mock(); + verify_last_check_time_and_peers(&subscription, mock_time_service.now(), HashSet::new()); + + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Add a more optimal peer to the set of peers + let new_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + + // Verify that the peer is optimal (not enough time has elapsed to check) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Elapse some amount of time (but not enough to check optimality) + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms / 2, + )); + + // Verify that the peer is still optimal (not enough time has elapsed to check) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Elapse enough time to check the peer optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is no longer optimal (a more optimal peer has been added) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); + + // Verify the time of the last peer optimality check + verify_last_check_time_and_peers( + &subscription, + mock_time_service.now(), + peers_and_metadata.keys().cloned().collect(), + ); + + // Elapse enough time to check the peer optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is now optimal (the peers haven't changed) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Remove the current peer from the list of peers + peers_and_metadata.remove(&peer_network_id); + + // Verify that the peer is not optimal (the peers have changed) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); + + // Verify the time of the last peer optimality check + verify_last_check_time_and_peers( + &subscription, + mock_time_service.now(), + peers_and_metadata.keys().cloned().collect(), + ); + } + + #[test] + fn test_check_subscription_peer_optimality_multiple() { + // Create a consensus observer config with a maximum of 2 subscriptions + let consensus_observer_config = create_observer_config(2); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Add a more optimal peer to the set of peers + let new_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + + // Elapse enough time to check the peer optimality + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is optimal (it's in the top 2 most optimal peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Add another more optimal peer to the set of peers + let another_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, another_optimal_peer, true, true); + + // Elapse enough time to check the peer optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is no longer optimal (it's not in the top 2 most optimal peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); + + // Remove the previous optimal peer from the list of peers + peers_and_metadata.remove(&new_optimal_peer); + + // Elapse enough time to check the peer optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is optimal (it's in the top 2 most optimal peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + } + + #[test] + fn test_check_subscription_peer_optimality_refresh() { + // Create a consensus observer config with a maximum of 1 subscription + let consensus_observer_config = create_observer_config(1); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Create a peers and metadata map for the subscription + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Verify that the peer is optimal (not enough time has elapsed to refresh) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Add a more optimal peer to the set of peers + let new_optimal_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, new_optimal_peer, true, true); + + // Verify that the peer is still optimal (not enough time has elapsed to refresh) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Elapse enough time to refresh optimality + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_refresh_interval_ms + 1, + )); + + // Verify that the peer is no longer optimal + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); + + // Elapse some amount of time (but not enough to refresh) + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_refresh_interval_ms / 2, + )); + + // Verify that the peer is now optimal (not enough time has elapsed to refresh) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Remove the more optimal peer from the list of peers + peers_and_metadata.remove(&new_optimal_peer); + + // Elapse enough time to refresh optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_refresh_interval_ms + 1, + )); + + // Verify that the peer is optimal + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Verify the time of the last peer optimality check + verify_last_check_time_and_peers( + &subscription, + mock_time_service.now(), + peers_and_metadata.keys().cloned().collect(), + ); + } + + #[test] + fn test_check_subscription_peer_optimality_supported() { + // Create a consensus observer config with a maximum of 1 subscription + let consensus_observer_config = create_observer_config(1); + + // Create a new observer subscription + let time_service = TimeService::mock(); + let peer_network_id = PeerNetworkId::random(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Insert empty metadata for the subscription peer + let mut peers_and_metadata = HashMap::new(); + add_metadata_for_peer(&mut peers_and_metadata, peer_network_id, true, false); + + // Elapse enough time to check optimality + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is still optimal (there are no other peers) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Add a more optimal peer without consensus observer support + let unsupported_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, unsupported_peer, false, false); + + // Elapse enough time to check optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is still optimal (the unsupported peer is ignored) + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, true); + + // Add another more optimal peer with consensus observer support + let supported_peer = PeerNetworkId::random(); + add_metadata_for_peer(&mut peers_and_metadata, supported_peer, true, true); + + // Elapse enough time to check optimality + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Verify that the peer is no longer optimal + verify_subscription_peer_optimality(&mut subscription, &peers_and_metadata, false); + } + + #[test] + fn test_check_subscription_timeout() { + // Create a new observer subscription + let consensus_observer_config = ConsensusObserverConfig::default(); + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Verify that the subscription has not timed out and that the last message time is updated + let current_time = time_service.now(); + verify_subscription_time_out(&subscription, false); + assert_eq!(subscription.last_message_receive_time, current_time); + + // Elapse some amount of time (but not enough to timeout) + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms / 2, + )); + + // Verify that the subscription has not timed out + verify_subscription_time_out(&subscription, false); + + // Update the last message receive time + let current_time = mock_time_service.now(); + subscription.update_last_message_receive_time(); + assert_eq!(subscription.last_message_receive_time, current_time); + + // Verify that the subscription has not timed out + verify_subscription_time_out(&subscription, false); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Verify that the subscription has timed out + verify_subscription_time_out(&subscription, true); + } + + #[test] + fn test_check_syncing_progress() { + // Create a mock DB reader with expectations + let first_synced_version = 10; + let second_synced_version = 20; + let mut mock_db_reader = MockDatabaseReader::new(); + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(first_synced_version)) + .times(2); // Only allow two calls for the first version + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(second_synced_version)); // Allow multiple calls for the second version + + // Create a new observer subscription + let consensus_observer_config = ConsensusObserverConfig::default(); + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(mock_db_reader), + peer_network_id, + time_service.clone(), + ); + + // Verify that the DB is making sync progress and that the highest synced version is updated + let mock_time_service = time_service.into_mock(); + verify_subscription_syncing_progress( + &mut subscription, + first_synced_version, + mock_time_service.now(), + ); + + // Elapse some amount of time (not enough to timeout) + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms / 2, + )); + + // Verify that the DB is still making sync progress + verify_subscription_syncing_progress( + &mut subscription, + first_synced_version, + mock_time_service.now(), + ); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Verify that the DB is still making sync progress (the next version is higher) + verify_subscription_syncing_progress( + &mut subscription, + second_synced_version, + mock_time_service.now(), + ); + + // Elapse enough time to timeout the subscription + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Verify that the DB is not making sync progress and that the subscription has timed out + assert_matches!( + subscription.check_syncing_progress(), + Err(Error::SubscriptionProgressStopped(_)) + ); + } + + #[test] + fn test_get_peer_network_id() { + // Create a new observer subscription + let consensus_observer_config = ConsensusObserverConfig::default(); + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Verify that the peer network id matches the expected value + assert_eq!(subscription.get_peer_network_id(), peer_network_id); + } + + #[test] + fn test_update_last_message_receive_time() { + // Create a new observer subscription + let consensus_observer_config = ConsensusObserverConfig::default(); + let peer_network_id = PeerNetworkId::random(); + let time_service = TimeService::mock(); + let mut subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + Arc::new(MockDatabaseReader::new()), + peer_network_id, + time_service.clone(), + ); + + // Verify the initial last message time + assert_eq!(subscription.last_message_receive_time, time_service.now()); + + // Elapse some amount of time + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_secs(10)); + + // Update the last message time + let current_time = mock_time_service.now(); + subscription.update_last_message_receive_time(); + + // Verify that the last message time is updated + assert_eq!(subscription.last_message_receive_time, current_time); + } + + /// Adds metadata for the specified peer to the map of peers and metadata + fn add_metadata_for_peer( + peers_and_metadata: &mut HashMap, + peer_network_id: PeerNetworkId, + support_consensus_observer: bool, + set_ping_latency: bool, + ) { + // Determine the ping latency to use for the peer + let average_ping_latency = if set_ping_latency { Some(0.1) } else { None }; + + // Add the peer and metadata to the map + peers_and_metadata.insert( + peer_network_id, + PeerMetadata::new_for_test( + create_connection_metadata(peer_network_id, support_consensus_observer), + PeerMonitoringMetadata::new(average_ping_latency, None, None, None, None), + ), + ); + } + + /// Creates a new connection metadata for testing + fn create_connection_metadata( + peer_network_id: PeerNetworkId, + support_consensus_observer: bool, + ) -> ConnectionMetadata { + if support_consensus_observer { + // Create a protocol set that supports consensus observer + let protocol_set = ProtocolIdSet::from_iter(vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]); + + // Create the connection metadata with the protocol set + ConnectionMetadata::new( + peer_network_id.peer_id(), + ConnectionId::default(), + NetworkAddress::mock(), + ConnectionOrigin::Inbound, + MessagingProtocolVersion::V1, + protocol_set, + PeerRole::PreferredUpstream, + ) + } else { + ConnectionMetadata::mock(peer_network_id.peer_id()) + } + } + + /// Creates a consensus observer config with the given max concurrent subscriptions + fn create_observer_config(max_concurrent_subscriptions: u64) -> ConsensusObserverConfig { + ConsensusObserverConfig { + max_concurrent_subscriptions, + ..ConsensusObserverConfig::default() + } + } + + /// Verifies that the last check time and peers are as expected + fn verify_last_check_time_and_peers( + subscription: &ConsensusObserverSubscription, + expected_last_check_time: Instant, + expected_last_check_peers: HashSet, + ) { + // Get the last check time and peers from the subscription + let (last_check_time, last_check_peers) = + subscription.last_optimality_check_time_and_peers.clone(); + + // Verify the last check time and peers match the expected values + assert_eq!(last_check_time, expected_last_check_time); + assert_eq!(last_check_peers, expected_last_check_peers); + } + + /// Verifies that the subscription time out matches the expected value + fn verify_subscription_time_out(subscription: &ConsensusObserverSubscription, timed_out: bool) { + // Check if the subscription has timed out + let result = subscription.check_subscription_timeout(); + + // Verify the result + if timed_out { + assert_matches!(result, Err(Error::SubscriptionTimeout(_))); + } else { + assert!(result.is_ok()); + } + } + + /// Verifies that the peer optimality matches the expected value + fn verify_subscription_peer_optimality( + subscription: &mut ConsensusObserverSubscription, + peers_and_metadata: &HashMap, + is_optimal: bool, + ) { + // Check the subscription peer optimality + let result = subscription.check_subscription_peer_optimality(peers_and_metadata); + + // Verify the result + if is_optimal { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionSuboptimal(_))); + } + } + + /// Verifies that the syncing progress is as expected + fn verify_subscription_syncing_progress( + subscription: &mut ConsensusObserverSubscription, + first_synced_version: Version, + time: Instant, + ) { + assert!(subscription.check_syncing_progress().is_ok()); + assert_eq!( + subscription.highest_synced_version_and_time, + (first_synced_version, time) + ); + } +} diff --git a/consensus/src/consensus_observer/observer/subscription_manager.rs b/consensus/src/consensus_observer/observer/subscription_manager.rs new file mode 100644 index 0000000000000..24ae1f7d321b4 --- /dev/null +++ b/consensus/src/consensus_observer/observer/subscription_manager.rs @@ -0,0 +1,1273 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::{ + error::Error, + logging::{LogEntry, LogSchema}, + metrics, + }, + network::{ + observer_client::ConsensusObserverClient, + observer_message::{ + ConsensusObserverMessage, ConsensusObserverRequest, ConsensusObserverResponse, + }, + }, + observer::{subscription::ConsensusObserverSubscription, subscription_utils}, + publisher::consensus_publisher::ConsensusPublisher, +}; +use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; +use aptos_infallible::Mutex; +use aptos_logger::{info, warn}; +use aptos_network::application::{interface::NetworkClient, metadata::PeerMetadata}; +use aptos_storage_interface::DbReader; +use aptos_time_service::TimeService; +use itertools::Itertools; +use std::{collections::HashMap, sync::Arc}; +use tokio::task::JoinHandle; + +/// The manager for consensus observer subscriptions +pub struct SubscriptionManager { + // The currently active set of consensus observer subscriptions + active_observer_subscriptions: + Arc>>, + + // The active subscription creation task (if one is currently running) + active_subscription_creation_task: Arc>>>, + + // The consensus observer client to send network messages + consensus_observer_client: + Arc>>, + + // The consensus observer configuration + consensus_observer_config: ConsensusObserverConfig, + + // The consensus publisher + consensus_publisher: Option>, + + // A handle to storage (used to read the latest state and check progress) + db_reader: Arc, + + // The time service (used to check progress) + time_service: TimeService, +} + +impl SubscriptionManager { + pub fn new( + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + consensus_observer_config: ConsensusObserverConfig, + consensus_publisher: Option>, + db_reader: Arc, + time_service: TimeService, + ) -> Self { + Self { + active_observer_subscriptions: Arc::new(Mutex::new(HashMap::new())), + active_subscription_creation_task: Arc::new(Mutex::new(None)), + consensus_observer_client, + consensus_observer_config, + consensus_publisher, + db_reader, + time_service, + } + } + + /// Checks if the subscription to the given peer is still healthy. + /// If not, an error explaining why it is unhealthy is returned. + fn check_subscription_health( + &mut self, + connected_peers_and_metadata: &HashMap, + peer_network_id: PeerNetworkId, + ) -> Result<(), Error> { + // Get the active subscription for the peer + let mut active_observer_subscriptions = self.active_observer_subscriptions.lock(); + let active_subscription = active_observer_subscriptions.get_mut(&peer_network_id); + + // Check the health of the subscription + match active_subscription { + Some(active_subscription) => { + active_subscription.check_subscription_health(connected_peers_and_metadata) + }, + None => Err(Error::UnexpectedError(format!( + "The subscription to peer: {:?} is not active!", + peer_network_id + ))), + } + } + + /// Checks the health of the active subscriptions. If any subscription is + /// unhealthy, it will be terminated and new subscriptions will be created. + /// This returns an error iff all subscriptions were unhealthy and terminated. + pub async fn check_and_manage_subscriptions(&mut self) -> Result<(), Error> { + // Get the subscription and connected peers + let initial_subscription_peers = self.get_active_subscription_peers(); + let connected_peers_and_metadata = self.get_connected_peers_and_metadata(); + + // Terminate any unhealthy subscriptions + let terminated_subscriptions = + self.terminate_unhealthy_subscriptions(&connected_peers_and_metadata); + + // Check if all subscriptions were terminated + let num_terminated_subscriptions = terminated_subscriptions.len(); + let all_subscriptions_terminated = num_terminated_subscriptions > 0 + && num_terminated_subscriptions == initial_subscription_peers.len(); + + // Calculate the number of new subscriptions to create + let remaining_subscription_peers = self.get_active_subscription_peers(); + let max_concurrent_subscriptions = + self.consensus_observer_config.max_concurrent_subscriptions as usize; + let num_subscriptions_to_create = + max_concurrent_subscriptions.saturating_sub(remaining_subscription_peers.len()); + + // Update the total subscription metrics + update_total_subscription_metrics(&remaining_subscription_peers); + + // Spawn a task to create the new subscriptions (asynchronously) + self.spawn_subscription_creation_task( + num_subscriptions_to_create, + remaining_subscription_peers, + terminated_subscriptions, + connected_peers_and_metadata, + ) + .await; + + // Return an error if all subscriptions were terminated + if all_subscriptions_terminated { + Err(Error::SubscriptionsReset(format!( + "All {:?} subscriptions were unhealthy and terminated!", + num_terminated_subscriptions, + ))) + } else { + Ok(()) + } + } + + /// Returns the currently active subscription peers + fn get_active_subscription_peers(&self) -> Vec { + let active_observer_subscriptions = self.active_observer_subscriptions.lock(); + active_observer_subscriptions.keys().cloned().collect() + } + + /// Gets the connected peers and metadata. If an error + /// occurred, it is logged and an empty map is returned. + fn get_connected_peers_and_metadata(&self) -> HashMap { + self.consensus_observer_client + .get_peers_and_metadata() + .get_connected_peers_and_metadata() + .unwrap_or_else(|error| { + // Log the error + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to get connected peers and metadata! Error: {:?}", + error + )) + ); + + // Return an empty map + HashMap::new() + }) + } + + /// Spawns a new subscription creation task to create + /// the specified number of new subscriptions. + async fn spawn_subscription_creation_task( + &mut self, + num_subscriptions_to_create: usize, + active_subscription_peers: Vec, + terminated_subscriptions: Vec<(PeerNetworkId, Error)>, + connected_peers_and_metadata: HashMap, + ) { + // If there are no new subscriptions to create, return early + if num_subscriptions_to_create == 0 { + return; + } + + // If there is an active subscription creation task, return early + if let Some(subscription_creation_task) = &*self.active_subscription_creation_task.lock() { + if !subscription_creation_task.is_finished() { + return; // The task is still running + } + } + + // Clone the shared state for the task + let active_observer_subscriptions = self.active_observer_subscriptions.clone(); + let consensus_observer_config = self.consensus_observer_config; + let consensus_observer_client = self.consensus_observer_client.clone(); + let consensus_publisher = self.consensus_publisher.clone(); + let db_reader = self.db_reader.clone(); + let time_service = self.time_service.clone(); + + // Spawn a new subscription creation task + let subscription_creation_task = tokio::spawn(async move { + // Identify the terminated subscription peers + let terminated_subscription_peers = terminated_subscriptions + .iter() + .map(|(peer, _)| *peer) + .collect(); + + // Create the new subscriptions + let new_subscriptions = subscription_utils::create_new_subscriptions( + consensus_observer_config, + consensus_observer_client, + consensus_publisher, + db_reader, + time_service, + connected_peers_and_metadata, + num_subscriptions_to_create, + active_subscription_peers, + terminated_subscription_peers, + ) + .await; + + // Identify the new subscription peers + let new_subscription_peers = new_subscriptions + .iter() + .map(|subscription| subscription.get_peer_network_id()) + .collect::>(); + + // Add the new subscriptions to the list of active subscriptions + for subscription in new_subscriptions { + active_observer_subscriptions + .lock() + .insert(subscription.get_peer_network_id(), subscription); + } + + // Log a warning if we failed to create as many subscriptions as requested + let num_subscriptions_created = new_subscription_peers.len(); + if num_subscriptions_created < num_subscriptions_to_create { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to create the requested number of subscriptions! Number of subscriptions \ + requested: {:?}, number of subscriptions created: {:?}.", + num_subscriptions_to_create, + num_subscriptions_created + )) + ); + } + + // Update the subscription change metrics + update_subscription_change_metrics(new_subscription_peers, terminated_subscriptions); + }); + + // Update the active subscription creation task + *self.active_subscription_creation_task.lock() = Some(subscription_creation_task); + } + + /// Terminates any unhealthy subscriptions and returns the list of terminated subscriptions + fn terminate_unhealthy_subscriptions( + &mut self, + connected_peers_and_metadata: &HashMap, + ) -> Vec<(PeerNetworkId, Error)> { + let mut terminated_subscriptions = vec![]; + for subscription_peer in self.get_active_subscription_peers() { + // Check the health of the subscription and terminate it if needed + if let Err(error) = + self.check_subscription_health(connected_peers_and_metadata, subscription_peer) + { + // Log the subscription termination error + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Terminating subscription to peer: {:?}! Termination reason: {:?}", + subscription_peer, error + )) + ); + + // Unsubscribe from the peer and remove the subscription + self.unsubscribe_from_peer(subscription_peer); + + // Add the peer to the list of terminated subscriptions + terminated_subscriptions.push((subscription_peer, error)); + } + } + + terminated_subscriptions + } + + /// Unsubscribes from the given peer by sending an unsubscribe request + fn unsubscribe_from_peer(&mut self, peer_network_id: PeerNetworkId) { + // Remove the peer from the active subscriptions + self.active_observer_subscriptions + .lock() + .remove(&peer_network_id); + + // Send an unsubscribe request to the peer and process the response. + // Note: we execute this asynchronously, as we don't need to wait for the response. + let consensus_observer_client = self.consensus_observer_client.clone(); + let consensus_observer_config = self.consensus_observer_config; + tokio::spawn(async move { + // Send the unsubscribe request to the peer + let unsubscribe_request = ConsensusObserverRequest::Unsubscribe; + let response = consensus_observer_client + .send_rpc_request_to_peer( + &peer_network_id, + unsubscribe_request, + consensus_observer_config.network_request_timeout_ms, + ) + .await; + + // Process the response + match response { + Ok(ConsensusObserverResponse::UnsubscribeAck) => { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Successfully unsubscribed from peer: {}!", + peer_network_id + )) + ); + }, + Ok(response) => { + // We received an invalid response + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Got unexpected response type: {:?}", + response.get_label() + )) + ); + }, + Err(error) => { + // We encountered an error while sending the request + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to send unsubscribe request to peer: {}! Error: {:?}", + peer_network_id, error + )) + ); + }, + } + }); + } + + /// Verifies that the message is from an active + /// subscription. If not, an error is returned. + pub fn verify_message_for_subscription( + &mut self, + message_sender: PeerNetworkId, + ) -> Result<(), Error> { + // Check if the message is from an active subscription + if let Some(active_subscription) = self + .active_observer_subscriptions + .lock() + .get_mut(&message_sender) + { + // Update the last message receive time and return early + active_subscription.update_last_message_receive_time(); + return Ok(()); + } + + // Otherwise, the message is not from an active subscription. + // Send another unsubscribe request, and return an error. + self.unsubscribe_from_peer(message_sender); + Err(Error::InvalidMessageError(format!( + "Received message from unexpected peer, and not an active subscription: {}!", + message_sender + ))) + } +} + +/// Updates the subscription creation and termination metrics +fn update_subscription_change_metrics( + new_subscription_peers: Vec, + terminated_subscription_peers: Vec<(PeerNetworkId, Error)>, +) { + // Update the created subscriptions metrics + for peer_network_id in new_subscription_peers { + metrics::increment_counter( + &metrics::OBSERVER_CREATED_SUBSCRIPTIONS, + metrics::CREATED_SUBSCRIPTION_LABEL, + &peer_network_id, + ); + } + + // Update the terminated subscriptions metrics + for (peer_network_id, termination_reason) in terminated_subscription_peers { + metrics::increment_counter( + &metrics::OBSERVER_TERMINATED_SUBSCRIPTIONS, + termination_reason.get_label(), + &peer_network_id, + ); + } +} + +/// Updates the total subscription metrics (grouped by network ID) +fn update_total_subscription_metrics(active_subscription_peers: &[PeerNetworkId]) { + for (network_id, active_subscription_peers) in &active_subscription_peers + .iter() + .chunk_by(|peer_network_id| peer_network_id.network_id()) + { + metrics::set_gauge( + &metrics::OBSERVER_NUM_ACTIVE_SUBSCRIPTIONS, + &network_id, + active_subscription_peers.collect::>().len() as i64, + ); + } +} + +#[cfg(test)] +mod test { + use super::*; + use aptos_config::{config::PeerRole, network_id::NetworkId}; + use aptos_netcore::transport::ConnectionOrigin; + use aptos_network::{ + application::storage::PeersAndMetadata, + protocols::wire::handshake::v1::{MessagingProtocolVersion, ProtocolId, ProtocolIdSet}, + transport::{ConnectionId, ConnectionMetadata}, + }; + use aptos_peer_monitoring_service_types::{ + response::NetworkInformationResponse, PeerMonitoringMetadata, + }; + use aptos_types::{network_address::NetworkAddress, transaction::Version, PeerId}; + use claims::assert_matches; + use maplit::hashmap; + use mockall::mock; + use std::{collections::BTreeMap, time::Duration}; + + // This is a simple mock of the DbReader (it generates a MockDatabaseReader) + mock! { + pub DatabaseReader {} + impl DbReader for DatabaseReader { + fn get_latest_ledger_info_version(&self) -> aptos_storage_interface::Result; + } + } + + #[tokio::test] + async fn test_check_and_manage_subscriptions() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Verify that no subscriptions are active + verify_active_subscription_peers(&subscription_manager, vec![]); + + // Check and manage the subscriptions + let result = subscription_manager.check_and_manage_subscriptions().await; + + // Verify that no subscriptions were terminated + assert!(result.is_ok()); + verify_active_subscription_peers(&subscription_manager, vec![]); + + // Add a new connected peer and subscription + let connected_peer_1 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + connected_peer_1, + time_service.clone(), + ); + + // Add another connected peer and subscription + let connected_peer_2 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 2, None, true); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + connected_peer_2, + TimeService::mock(), // Use a different time service (to avoid timeouts!) + ); + + // Check and manage the subscriptions + subscription_manager + .check_and_manage_subscriptions() + .await + .unwrap(); + + // Verify that the subscriptions are still active + verify_active_subscription_peers(&subscription_manager, vec![ + connected_peer_1, + connected_peer_2, + ]); + + // Elapse time to simulate a timeout for peer 1 + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Check and manage the subscriptions + subscription_manager + .check_and_manage_subscriptions() + .await + .unwrap(); + + // Verify that the first subscription was terminated + verify_active_subscription_peers(&subscription_manager, vec![connected_peer_2]); + + // Disconnect the second peer + remove_peer_and_connection(peers_and_metadata.clone(), connected_peer_2); + + // Check and manage the subscriptions + let result = subscription_manager.check_and_manage_subscriptions().await; + + // Verify that the second subscription was terminated and an error was returned + verify_active_subscription_peers(&subscription_manager, vec![]); + assert_matches!(result, Err(Error::SubscriptionsReset(_))); + } + + #[tokio::test] + async fn test_check_subscription_health_connected() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + TimeService::mock(), + ); + + // Create a new subscription + let peer_network_id = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + peer_network_id, + TimeService::mock(), + ); + + // Check the active subscription and verify that it unhealthy (the peer is not connected) + check_subscription_connection(&mut subscription_manager, peer_network_id, false); + + // Terminate unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![peer_network_id]); + + // Add a new connected peer + let connected_peer = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + + // Create a subscription to the new peer + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader, + connected_peer, + TimeService::mock(), + ); + + // Check the active subscription is still healthy + check_subscription_connection(&mut subscription_manager, connected_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Verify that the active subscription is still present + verify_active_subscription_peers(&subscription_manager, vec![connected_peer]); + } + + #[tokio::test] + async fn test_check_subscription_health_progress_stopped() { + // Create a consensus observer config + let consensus_observer_config = ConsensusObserverConfig { + max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + ..ConsensusObserverConfig::default() + }; + + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Add a new connected peer + let connected_peer = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + + // Create a subscription to the new peer + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + connected_peer, + time_service.clone(), + ); + + // Check the active subscription and verify that it is healthy + check_subscription_progress(&mut subscription_manager, connected_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Elapse time to simulate a DB progress error + let mock_time_service = time_service.clone().into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_synced_version_timeout_ms + 1, + )); + + // Check the active subscription and verify that it is unhealthy (the DB is not syncing) + check_subscription_progress(&mut subscription_manager, connected_peer, false); + + // Terminate unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![connected_peer]); + + // Verify the active subscription is no longer present + verify_active_subscription_peers(&subscription_manager, vec![]); + } + + #[tokio::test] + async fn test_check_subscription_health_timeout() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Add a new connected peer + let connected_peer = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + + // Create a subscription to the new peer + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + connected_peer, + time_service.clone(), + ); + + // Check the active subscription and verify that it is healthy + check_subscription_timeout(&mut subscription_manager, connected_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Elapse time to simulate a timeout + let mock_time_service = time_service.clone().into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Check the active subscription and verify that it is unhealthy (the subscription timed out) + check_subscription_timeout(&mut subscription_manager, connected_peer, false); + + // Terminate unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![connected_peer]); + + // Verify the active subscription is no longer present + verify_active_subscription_peers(&subscription_manager, vec![]); + } + + #[tokio::test] + async fn test_check_subscription_health_suboptimal() { + // Create a consensus observer config + let consensus_observer_config = ConsensusObserverConfig { + max_subscription_timeout_ms: 100_000_000, // Use a large value so that we don't time out + max_concurrent_subscriptions: 1, // Only allow one subscription + max_synced_version_timeout_ms: 100_000_000, // Use a large value so that we don't get DB progress errors + ..ConsensusObserverConfig::default() + }; + + // Create a consensus observer client + let network_id = NetworkId::Validator; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Add an optimal validator peer + create_peer_and_connection(network_id, peers_and_metadata.clone(), 0, Some(0.1), true); + + // Add a suboptimal validator peer + let suboptimal_peer = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + + // Create a new subscription to the suboptimal peer + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + suboptimal_peer, + time_service.clone(), + ); + + // Check the active subscription and verify that it is healthy + check_subscription_optimality(&mut subscription_manager, suboptimal_peer, true); + + // Terminate unhealthy subscriptions and verify none are removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Elapse enough time to trigger the peer optimality check + let mock_time_service = time_service.clone().into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_peer_change_interval_ms + 1, + )); + + // Check the active subscription and verify that it is unhealthy (the peer is suboptimal) + check_subscription_optimality(&mut subscription_manager, suboptimal_peer, false); + + // Elapse enough time to trigger the peer optimality check again + let mock_time_service = time_service.clone().into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.subscription_refresh_interval_ms + 1, + )); + + // Terminate any unhealthy subscriptions and verify the subscription was removed + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![suboptimal_peer]); + + // Verify the active subscription is no longer present + verify_active_subscription_peers(&subscription_manager, vec![]); + } + + #[tokio::test] + #[allow(clippy::await_holding_lock)] // Required to wait on the subscription creation task + async fn test_spawn_subscription_creation_task() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (_, consensus_observer_client) = create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Verify that the active subscription creation task is empty + verify_subscription_creation_task(&subscription_manager, false); + + // Spawn a subscription creation task with 0 subscriptions to create + subscription_manager + .spawn_subscription_creation_task(0, vec![], vec![], hashmap![]) + .await; + + // Verify that the active subscription creation task is still empty (no task was spawned) + verify_subscription_creation_task(&subscription_manager, false); + + // Spawn a subscription creation task with 1 subscription to create + subscription_manager + .spawn_subscription_creation_task(1, vec![], vec![], hashmap![]) + .await; + + // Verify that the active subscription creation task is now populated + verify_subscription_creation_task(&subscription_manager, true); + + // Wait for the active subscription creation task to finish + if let Some(active_task) = subscription_manager + .active_subscription_creation_task + .lock() + .as_mut() + { + active_task.await.unwrap(); + } + + // Verify that the active subscription creation task is still present + verify_subscription_creation_task(&subscription_manager, true); + + // Verify that the active subscription creation task is finished + if let Some(active_task) = subscription_manager + .active_subscription_creation_task + .lock() + .as_ref() + { + assert!(active_task.is_finished()); + } + + // Spawn a subscription creation task with 2 subscriptions to create + subscription_manager + .spawn_subscription_creation_task(2, vec![], vec![], hashmap![]) + .await; + + // Verify the new active subscription creation task is not finished + if let Some(active_task) = subscription_manager + .active_subscription_creation_task + .lock() + .as_ref() + { + assert!(!active_task.is_finished()); + }; + } + + #[tokio::test] + async fn test_terminate_unhealthy_subscriptions_multiple() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (peers_and_metadata, consensus_observer_client) = + create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = create_mock_db_reader(); + let time_service = TimeService::mock(); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + time_service.clone(), + ); + + // Create two new subscriptions + let subscription_peer_1 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + let subscription_peer_2 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + for peer in &[subscription_peer_1, subscription_peer_2] { + // Create the subscription + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + *peer, + time_service.clone(), + ); + } + + // Terminate unhealthy subscriptions and verify that both subscriptions are still healthy + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![]); + + // Create another subscription + let subscription_peer_3 = + create_peer_and_connection(network_id, peers_and_metadata.clone(), 1, None, true); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer_3, + TimeService::mock(), // Use a different time service (to avoid timeouts) + ); + + // Elapse time to simulate a timeout (on the first two subscriptions) + let mock_time_service = time_service.into_mock(); + mock_time_service.advance(Duration::from_millis( + consensus_observer_config.max_subscription_timeout_ms + 1, + )); + + // Terminate unhealthy subscriptions and verify the first two subscriptions were terminated + verify_terminated_unhealthy_subscriptions(&mut subscription_manager, vec![ + subscription_peer_1, + subscription_peer_2, + ]); + + // Verify the third subscription is still active + verify_active_subscription_peers(&subscription_manager, vec![subscription_peer_3]); + } + + #[tokio::test] + async fn test_unsubscribe_from_peer() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (_, consensus_observer_client) = create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = Arc::new(MockDatabaseReader::new()); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + TimeService::mock(), + ); + + // Verify that no subscriptions are active + verify_active_subscription_peers(&subscription_manager, vec![]); + + // Create a new subscription + let subscription_peer_1 = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer_1, + TimeService::mock(), + ); + + // Verify the subscription is active + verify_active_subscription_peers(&subscription_manager, vec![subscription_peer_1]); + + // Create another subscription + let subscription_peer_2 = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer_2, + TimeService::mock(), + ); + + // Verify the second subscription is active + verify_active_subscription_peers(&subscription_manager, vec![ + subscription_peer_1, + subscription_peer_2, + ]); + + // Unsubscribe from the first peer + subscription_manager.unsubscribe_from_peer(subscription_peer_1); + + // Verify that the first subscription is no longer active + verify_active_subscription_peers(&subscription_manager, vec![subscription_peer_2]); + } + + #[tokio::test] + async fn test_verify_message_for_subscription() { + // Create a consensus observer client + let network_id = NetworkId::Public; + let (_, consensus_observer_client) = create_consensus_observer_client(&[network_id]); + + // Create a new subscription manager + let consensus_observer_config = ConsensusObserverConfig::default(); + let db_reader = Arc::new(MockDatabaseReader::new()); + let mut subscription_manager = SubscriptionManager::new( + consensus_observer_client, + consensus_observer_config, + None, + db_reader.clone(), + TimeService::mock(), + ); + + // Check that message verification fails (we have no active subscriptions) + check_message_verification_result( + &mut subscription_manager, + PeerNetworkId::random(), + false, + ); + + // Create a new subscription + let subscription_peer = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + subscription_peer, + TimeService::mock(), + ); + + // Check that message verification passes for the subscription + check_message_verification_result(&mut subscription_manager, subscription_peer, true); + + // Create another subscription + let second_subscription_peer = PeerNetworkId::random(); + create_observer_subscription( + &mut subscription_manager, + consensus_observer_config, + db_reader.clone(), + second_subscription_peer, + TimeService::mock(), + ); + + // Check that message verification passes for the second subscription + check_message_verification_result( + &mut subscription_manager, + second_subscription_peer, + true, + ); + + // Check that message verification fails if the peer doesn't match either subscription + check_message_verification_result( + &mut subscription_manager, + PeerNetworkId::random(), + false, + ); + } + + /// Checks the result of verifying a message from a given peer + fn check_message_verification_result( + subscription_manager: &mut SubscriptionManager, + peer_network_id: PeerNetworkId, + pass_verification: bool, + ) { + // Verify the message for the given peer + let result = subscription_manager.verify_message_for_subscription(peer_network_id); + + // Ensure the result matches the expected value + if pass_verification { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::InvalidMessageError(_))); + } + } + + /// Checks the health of a subscription and verifies the connection status + fn check_subscription_connection( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_connected: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected connection status + if expect_connected { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionDisconnected(_))); + } + } + + /// Checks the health of a subscription and verifies the optimality status + fn check_subscription_optimality( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_optimal: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected optimality status + if expect_optimal { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionSuboptimal(_))); + } + } + + /// Checks the health of a subscription and verifies the progress status + fn check_subscription_progress( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_progress: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected progress status + if expect_progress { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionProgressStopped(_))); + } + } + + /// Checks the health of a subscription and verifies the timeout status + fn check_subscription_timeout( + subscription_manager: &mut SubscriptionManager, + subscription_peer: PeerNetworkId, + expect_timeout: bool, + ) { + // Check the health of the subscription + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + let result = subscription_manager + .check_subscription_health(&connected_peers_and_metadata, subscription_peer); + + // Check the result based on the expected timeout status + if expect_timeout { + assert!(result.is_ok()); + } else { + assert_matches!(result, Err(Error::SubscriptionTimeout(_))); + } + } + + /// Creates a new consensus observer client and a peers and metadata container + fn create_consensus_observer_client( + network_ids: &[NetworkId], + ) -> ( + Arc, + Arc>>, + ) { + let peers_and_metadata = PeersAndMetadata::new(network_ids); + let network_client = + NetworkClient::new(vec![], vec![], hashmap![], peers_and_metadata.clone()); + let consensus_observer_client = Arc::new(ConsensusObserverClient::new(network_client)); + + (peers_and_metadata, consensus_observer_client) + } + + /// Creates a mock DB reader that always returns 0 for the latest version + fn create_mock_db_reader() -> Arc { + let mut mock_db_reader = MockDatabaseReader::new(); + mock_db_reader + .expect_get_latest_ledger_info_version() + .returning(move || Ok(0)); + Arc::new(mock_db_reader) + } + + /// Creates a new observer subscription for the specified peer + fn create_observer_subscription( + subscription_manager: &mut SubscriptionManager, + consensus_observer_config: ConsensusObserverConfig, + db_reader: Arc, + subscription_peer: PeerNetworkId, + time_service: TimeService, + ) { + let observer_subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + db_reader.clone(), + subscription_peer, + time_service, + ); + subscription_manager + .active_observer_subscriptions + .lock() + .insert(subscription_peer, observer_subscription); + } + + /// Creates a new peer with the specified connection metadata + fn create_peer_and_connection( + network_id: NetworkId, + peers_and_metadata: Arc, + distance_from_validators: u64, + ping_latency_secs: Option, + support_consensus_observer: bool, + ) -> PeerNetworkId { + // Create the connection metadata + let peer_network_id = PeerNetworkId::new(network_id, PeerId::random()); + let connection_metadata = if support_consensus_observer { + // Create a protocol set that supports consensus observer + let protocol_set = ProtocolIdSet::from_iter(vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]); + + // Create the connection metadata with the protocol set + ConnectionMetadata::new( + peer_network_id.peer_id(), + ConnectionId::default(), + NetworkAddress::mock(), + ConnectionOrigin::Inbound, + MessagingProtocolVersion::V1, + protocol_set, + PeerRole::PreferredUpstream, + ) + } else { + ConnectionMetadata::mock(peer_network_id.peer_id()) + }; + + // Insert the connection into peers and metadata + peers_and_metadata + .insert_connection_metadata(peer_network_id, connection_metadata.clone()) + .unwrap(); + + // Update the peer monitoring metadata + let latest_network_info_response = NetworkInformationResponse { + connected_peers: BTreeMap::new(), + distance_from_validators, + }; + let monitoring_metdata = PeerMonitoringMetadata::new( + ping_latency_secs, + ping_latency_secs, + Some(latest_network_info_response), + None, + None, + ); + peers_and_metadata + .update_peer_monitoring_metadata(peer_network_id, monitoring_metdata.clone()) + .unwrap(); + + peer_network_id + } + + /// Removes the peer and connection metadata for the given peer + fn remove_peer_and_connection( + peers_and_metadata: Arc, + peer_network_id: PeerNetworkId, + ) { + let peer_metadata = peers_and_metadata + .get_metadata_for_peer(peer_network_id) + .unwrap(); + let connection_id = peer_metadata.get_connection_metadata().connection_id; + peers_and_metadata + .remove_peer_metadata(peer_network_id, connection_id) + .unwrap(); + } + + /// Verifies the active subscription peers + fn verify_active_subscription_peers( + subscription_manager: &SubscriptionManager, + expected_active_peers: Vec, + ) { + // Get the active subscription peers + let active_peers = subscription_manager.get_active_subscription_peers(); + + // Verify the active subscription peers + for peer in &expected_active_peers { + assert!(active_peers.contains(peer)); + } + assert_eq!(active_peers.len(), expected_active_peers.len()); + } + + /// Verifies the status of the active subscription creation task + fn verify_subscription_creation_task( + subscription_manager: &SubscriptionManager, + expect_active_task: bool, + ) { + let current_active_task = subscription_manager + .active_subscription_creation_task + .lock() + .is_some(); + assert_eq!(current_active_task, expect_active_task); + } + + /// Verifies the list of terminated unhealthy subscriptions + fn verify_terminated_unhealthy_subscriptions( + subscription_manager: &mut SubscriptionManager, + expected_terminated_peers: Vec, + ) { + // Get the connected peers and metadata + let connected_peers_and_metadata = subscription_manager.get_connected_peers_and_metadata(); + + // Terminate any unhealthy subscriptions + let terminated_subscriptions = + subscription_manager.terminate_unhealthy_subscriptions(&connected_peers_and_metadata); + + // Verify the terminated subscriptions + for (terminated_subscription_peer, _) in &terminated_subscriptions { + assert!(expected_terminated_peers.contains(terminated_subscription_peer)); + } + assert_eq!( + terminated_subscriptions.len(), + expected_terminated_peers.len() + ); + } +} diff --git a/consensus/src/consensus_observer/observer/subscription_utils.rs b/consensus/src/consensus_observer/observer/subscription_utils.rs new file mode 100644 index 0000000000000..0bca7c61b007d --- /dev/null +++ b/consensus/src/consensus_observer/observer/subscription_utils.rs @@ -0,0 +1,1186 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::consensus_observer::{ + common::logging::{LogEntry, LogSchema}, + network::{ + observer_client::ConsensusObserverClient, + observer_message::{ + ConsensusObserverMessage, ConsensusObserverRequest, ConsensusObserverResponse, + }, + }, + observer::subscription::ConsensusObserverSubscription, + publisher::consensus_publisher::ConsensusPublisher, +}; +use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; +use aptos_logger::{error, info, warn}; +use aptos_network::{ + application::{interface::NetworkClient, metadata::PeerMetadata}, + ProtocolId, +}; +use aptos_storage_interface::DbReader; +use aptos_time_service::TimeService; +use ordered_float::OrderedFloat; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +// A useful constant for representing the maximum ping latency +const MAX_PING_LATENCY_SECS: f64 = 10_000.0; + +/// Attempts to create the given number of new subscriptions +/// from the connected peers and metadata. Any active or unhealthy +/// subscriptions are excluded from the selection process. +pub async fn create_new_subscriptions( + consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + consensus_publisher: Option>, + db_reader: Arc, + time_service: TimeService, + connected_peers_and_metadata: HashMap, + num_subscriptions_to_create: usize, + active_subscription_peers: Vec, + unhealthy_subscription_peers: Vec, +) -> Vec { + // Sort the potential peers for subscription requests + let mut sorted_potential_peers = match sort_peers_for_subscriptions( + connected_peers_and_metadata, + unhealthy_subscription_peers, + active_subscription_peers, + consensus_publisher, + ) { + Some(sorted_peers) => sorted_peers, + None => { + error!(LogSchema::new(LogEntry::ConsensusObserver) + .message("Failed to sort peers for subscription requests!")); + return vec![]; + }, + }; + + // Verify that we have potential peers to subscribe to + if sorted_potential_peers.is_empty() { + warn!(LogSchema::new(LogEntry::ConsensusObserver) + .message("There are no potential peers to subscribe to!")); + return vec![]; + } + + // Go through the potential peers and attempt to create new subscriptions + let mut created_subscriptions = vec![]; + for _ in 0..num_subscriptions_to_create { + // If there are no peers left to subscribe to, return early + if sorted_potential_peers.is_empty() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "There are no more potential peers to subscribe to! \ + Num created subscriptions: {:?}", + created_subscriptions.len() + )) + ); + break; + } + + // Attempt to create a new subscription + let (observer_subscription, failed_subscription_peers) = create_single_subscription( + consensus_observer_config, + consensus_observer_client.clone(), + db_reader.clone(), + sorted_potential_peers.clone(), + time_service.clone(), + ) + .await; + + // Remove the failed peers from the sorted list + sorted_potential_peers.retain(|peer| !failed_subscription_peers.contains(peer)); + + // Process a successful subscription creation + if let Some(observer_subscription) = observer_subscription { + // Remove the peer from the sorted list (for the next selection) + sorted_potential_peers + .retain(|peer| *peer != observer_subscription.get_peer_network_id()); + + // Add the newly created subscription to the subscription list + created_subscriptions.push(observer_subscription); + } + } + + // Return the list of created subscriptions + created_subscriptions +} + +/// Attempts to create a new subscription to a single peer from the +/// sorted list of potential peers. If successful, the new subscription +/// is returned, alongside any peers with failed attempts. +async fn create_single_subscription( + consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + db_reader: Arc, + sorted_potential_peers: Vec, + time_service: TimeService, +) -> (Option, Vec) { + let mut peers_with_failed_attempts = vec![]; + for potential_peer in sorted_potential_peers { + // Log the subscription attempt + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Attempting to subscribe to potential peer: {}!", + potential_peer + )) + ); + + // Send a subscription request to the peer and wait for the response + let subscription_request = ConsensusObserverRequest::Subscribe; + let request_timeout_ms = consensus_observer_config.network_request_timeout_ms; + let response = consensus_observer_client + .send_rpc_request_to_peer(&potential_peer, subscription_request, request_timeout_ms) + .await; + + // Process the response and update the active subscription + match response { + Ok(ConsensusObserverResponse::SubscribeAck) => { + // Log the successful subscription + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Successfully subscribed to peer: {}!", + potential_peer + )) + ); + + // Create the new subscription + let subscription = ConsensusObserverSubscription::new( + consensus_observer_config, + db_reader.clone(), + potential_peer, + time_service.clone(), + ); + + // Return the successful subscription + return (Some(subscription), peers_with_failed_attempts); + }, + Ok(response) => { + // We received an invalid response + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Got unexpected response type for subscription request: {:?}", + response.get_label() + )) + ); + + // Add the peer to the list of failed attempts + peers_with_failed_attempts.push(potential_peer); + }, + Err(error) => { + // We encountered an error while sending the request + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Failed to send subscription request to peer: {}! Error: {:?}", + potential_peer, error + )) + ); + + // Add the peer to the list of failed attempts + peers_with_failed_attempts.push(potential_peer); + }, + } + } + + // We failed to create a new subscription + (None, peers_with_failed_attempts) +} + +/// Gets the distance from the validators for the specified peer from the peer metadata +fn get_distance_for_peer( + peer_network_id: &PeerNetworkId, + peer_metadata: &PeerMetadata, +) -> Option { + // Get the distance for the peer + let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); + let distance = peer_monitoring_metadata + .latest_network_info_response + .as_ref() + .map(|response| response.distance_from_validators); + + // If the distance is missing, log a warning + if distance.is_none() { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Unable to get distance for peer! Peer: {:?}", + peer_network_id + )) + ); + } + + distance +} + +/// Gets the latency for the specified peer from the peer metadata +fn get_latency_for_peer( + peer_network_id: &PeerNetworkId, + peer_metadata: &PeerMetadata, +) -> Option { + // Get the latency for the peer + let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); + let latency = peer_monitoring_metadata.average_ping_latency_secs; + + // If the latency is missing, log a warning + if latency.is_none() { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Unable to get latency for peer! Peer: {:?}", + peer_network_id + )) + ); + } + + latency +} + +/// Produces a list of sorted peers to service the subscription requests. +/// Any active or unhealthy subscriptions are excluded from the selection process. +/// Likewise, any peers currently subscribed to us are also excluded. +fn sort_peers_for_subscriptions( + mut connected_peers_and_metadata: HashMap, + active_subscription_peers: Vec, + unhealthy_subscription_peers: Vec, + consensus_publisher: Option>, +) -> Option> { + // Remove any peers we're already subscribed to + for active_subscription_peer in active_subscription_peers { + let _ = connected_peers_and_metadata.remove(&active_subscription_peer); + } + + // Remove any unhealthy subscription peers + for unhealthy_peer in unhealthy_subscription_peers { + let _ = connected_peers_and_metadata.remove(&unhealthy_peer); + } + + // Remove any peers that are currently subscribed to us + if let Some(consensus_publisher) = consensus_publisher { + for peer_network_id in consensus_publisher.get_active_subscribers() { + let _ = connected_peers_and_metadata.remove(&peer_network_id); + } + } + + // Sort the peers by subscription optimality + let sorted_peers = sort_peers_by_subscription_optimality(&connected_peers_and_metadata); + + // Return the sorted peers + Some(sorted_peers) +} + +/// Sorts the peers by subscription optimality (in descending order of +/// optimality). This requires: (i) sorting the peers by distance from the +/// validator set and ping latency (lower values are more optimal); and (ii) +/// filtering out peers that don't support consensus observer. +/// +/// Note: we prioritize distance over latency as we want to avoid close +/// but not up-to-date peers. If peers don't have sufficient metadata +/// for sorting, they are given a lower priority. +pub fn sort_peers_by_subscription_optimality( + peers_and_metadata: &HashMap, +) -> Vec { + // Group peers and latencies by validator distance, i.e., distance -> [(peer, latency)] + let mut unsupported_peers = Vec::new(); + let mut peers_and_latencies_by_distance = BTreeMap::new(); + for (peer_network_id, peer_metadata) in peers_and_metadata { + // Verify that the peer supports consensus observer + if !supports_consensus_observer(peer_metadata) { + unsupported_peers.push(*peer_network_id); + continue; // Skip the peer + } + + // Get the distance and latency for the peer + let distance = get_distance_for_peer(peer_network_id, peer_metadata); + let latency = get_latency_for_peer(peer_network_id, peer_metadata); + + // If the distance is not found, use the maximum distance + let distance = + distance.unwrap_or(aptos_peer_monitoring_service_types::MAX_DISTANCE_FROM_VALIDATORS); + + // If the latency is not found, use a large latency + let latency = latency.unwrap_or(MAX_PING_LATENCY_SECS); + + // Add the peer and latency to the distance group + peers_and_latencies_by_distance + .entry(distance) + .or_insert_with(Vec::new) + .push((*peer_network_id, OrderedFloat(latency))); + } + + // If there are peers that don't support consensus observer, log them + if !unsupported_peers.is_empty() { + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Found {} peers that don't support consensus observer! Peers: {:?}", + unsupported_peers.len(), + unsupported_peers + )) + ); + } + + // Sort the peers by distance and latency. Note: BTreeMaps are + // sorted by key, so the entries will be sorted by distance in ascending order. + let mut sorted_peers = Vec::new(); + for (_, mut peers_and_latencies) in peers_and_latencies_by_distance { + // Sort the peers by latency + peers_and_latencies.sort_by_key(|(_, latency)| *latency); + + // Add the peers to the sorted list (in sorted order) + sorted_peers.extend( + peers_and_latencies + .into_iter() + .map(|(peer_network_id, _)| peer_network_id), + ); + } + + // Log the sorted peers + info!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Sorted {} peers by subscription optimality! Peers: {:?}", + sorted_peers.len(), + sorted_peers + )) + ); + + sorted_peers +} + +/// Returns true iff the peer metadata indicates support for consensus observer +fn supports_consensus_observer(peer_metadata: &PeerMetadata) -> bool { + peer_metadata.supports_protocol(ProtocolId::ConsensusObserver) + && peer_metadata.supports_protocol(ProtocolId::ConsensusObserverRpc) +} + +#[cfg(test)] +mod tests { + use super::*; + use aptos_channels::{aptos_channel, message_queues::QueueStyle}; + use aptos_config::{config::PeerRole, network_id::NetworkId}; + use aptos_netcore::transport::ConnectionOrigin; + use aptos_network::{ + application::storage::PeersAndMetadata, + peer_manager::{ConnectionRequestSender, PeerManagerRequest, PeerManagerRequestSender}, + protocols::{ + network::{NetworkSender, NewNetworkSender}, + wire::handshake::v1::{MessagingProtocolVersion, ProtocolIdSet}, + }, + transport::{ConnectionId, ConnectionMetadata}, + }; + use aptos_peer_monitoring_service_types::{ + response::NetworkInformationResponse, PeerMonitoringMetadata, + }; + use aptos_storage_interface::Result; + use aptos_types::{network_address::NetworkAddress, transaction::Version, PeerId}; + use bytes::Bytes; + use futures::StreamExt; + use mockall::mock; + use std::collections::HashSet; + + // This is a simple mock of the DbReader (it generates a MockDatabaseReader) + mock! { + pub DatabaseReader {} + impl DbReader for DatabaseReader { + fn get_latest_ledger_info_version(&self) -> Result; + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_new_subscriptions() { + // Create a consensus observer config and client + let consensus_observer_config = ConsensusObserverConfig::default(); + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, mut peer_manager_request_receivers) = + create_consensus_observer_client(network_ids); + + // Create a list of connected peers (one per network) + let mut connected_peers = vec![]; + for network_id in &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public] { + // Create a new peer + let peer_network_id = create_peer_and_connection( + *network_id, + peers_and_metadata.clone(), + get_distance_from_validators(network_id), + None, + true, + ); + + // Add the peer to the list of sorted peers + connected_peers.push(peer_network_id); + } + + // Get the connected peers and metadata + let connected_peers_and_metadata = peers_and_metadata + .get_connected_peers_and_metadata() + .unwrap(); + + // Spawn the subscription creation task to create 2 subscriptions + let num_subscriptions_to_create = 2; + let subscription_creation_handle = tokio::spawn(async move { + create_new_subscriptions( + consensus_observer_config, + consensus_observer_client.clone(), + None, + Arc::new(MockDatabaseReader::new()), + TimeService::mock(), + connected_peers_and_metadata, + num_subscriptions_to_create, + vec![], + vec![], + ) + .await + }); + + // Handle the peer manager requests made by the subscription creation task. + // The VFN peer should fail the subscription request. + for connected_peer in &connected_peers { + let network_id = connected_peer.network_id(); + handle_next_subscription_request( + network_id, + &mut peer_manager_request_receivers, + network_id != NetworkId::Vfn, // The VFN peer should fail the subscription request + ) + .await; + } + + // Wait for the subscription creation task to complete + let consensus_observer_subscriptions = subscription_creation_handle.await.unwrap(); + + // Verify the number of created subscriptions + assert_eq!( + consensus_observer_subscriptions.len(), + num_subscriptions_to_create + ); + + // Verify the created subscription peers + let first_peer = *connected_peers.first().unwrap(); + let last_peer = *connected_peers.last().unwrap(); + let expected_subscription_peers = [first_peer, last_peer]; + for consensus_observer_subscription in consensus_observer_subscriptions { + let peer_network_id = consensus_observer_subscription.get_peer_network_id(); + assert!(expected_subscription_peers.contains(&peer_network_id)); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_new_subscriptions_multiple() { + // Create a consensus observer config and client + let consensus_observer_config = ConsensusObserverConfig::default(); + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, mut peer_manager_request_receivers) = + create_consensus_observer_client(network_ids); + + // Create a list of connected peers (one per network) + let mut connected_peers = vec![]; + for network_id in &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public] { + // Create a new peer + let peer_network_id = create_peer_and_connection( + *network_id, + peers_and_metadata.clone(), + get_distance_from_validators(network_id), + None, + true, + ); + + // Add the peer to the list of sorted peers + connected_peers.push(peer_network_id); + } + + // Create multiple sets of subscriptions and verify the results + for num_subscriptions_to_create in [0, 1, 2, 3, 10] { + // Determine the expected subscription peers + let expected_subscription_peers = connected_peers + .iter() + .take(num_subscriptions_to_create) + .cloned() + .collect(); + + // Create the subscriptions and verify the result + create_and_verify_subscriptions( + consensus_observer_config, + peers_and_metadata.clone(), + consensus_observer_client.clone(), + &mut peer_manager_request_receivers, + num_subscriptions_to_create, + expected_subscription_peers, + ) + .await; + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_create_single_subscription() { + // Create a consensus observer config and client + let consensus_observer_config = ConsensusObserverConfig::default(); + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, mut peer_manager_request_receivers) = + create_consensus_observer_client(network_ids); + + // Create a list of connected peers (one per network) + let mut connected_peers = vec![]; + for network_id in &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public] { + // Create a new peer + let peer_network_id = + create_peer_and_connection(*network_id, peers_and_metadata.clone(), 0, None, true); + + // Add the peer to the list of sorted peers + connected_peers.push(peer_network_id); + } + + // Spawn the subscription creation task + let sorted_potential_peers = connected_peers.clone(); + let subscription_creation_handle = tokio::spawn(async move { + create_single_subscription( + consensus_observer_config, + consensus_observer_client.clone(), + Arc::new(MockDatabaseReader::new()), + sorted_potential_peers, + TimeService::mock(), + ) + .await + }); + + // Handle the peer manager requests made by the subscription creation task. + // We should only respond successfully to the peer on the public network. + handle_next_subscription_request( + NetworkId::Validator, + &mut peer_manager_request_receivers, + false, + ) + .await; + handle_next_subscription_request( + NetworkId::Vfn, + &mut peer_manager_request_receivers, + false, + ) + .await; + handle_next_subscription_request( + NetworkId::Public, + &mut peer_manager_request_receivers, + true, + ) + .await; + + // Wait for the subscription creation task to complete + let (observer_subscription, failed_subscription_peers) = + subscription_creation_handle.await.unwrap(); + + // Verify that the public peer was successfully subscribed to + assert_eq!( + &observer_subscription.unwrap().get_peer_network_id(), + connected_peers.last().unwrap() + ); + + // Verify that the other peers failed our subscription attempts + let expected_failed_peers = connected_peers.iter().take(2).cloned().collect::>(); + assert_eq!(failed_subscription_peers, expected_failed_peers); + } + + #[test] + fn test_sort_peers_by_distance_and_latency() { + // Sort an empty list of peers + let peers_and_metadata = HashMap::new(); + assert!(sort_peers_by_subscription_optimality(&peers_and_metadata).is_empty()); + + // Create a list of peers with empty metadata + let peers_and_metadata = create_peers_and_metadata(true, true, true, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 10); + + // Create a list of peers with valid metadata + let peers_and_metadata = create_peers_and_metadata(false, false, true, 10); + + // Sort the peers + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + + // Verify the order of the peers + verify_increasing_distance_latencies(&peers_and_metadata, &sorted_peers); + assert_eq!(sorted_peers.len(), 10); + + // Create a list of peers with and without metadata + let mut peers_and_metadata = create_peers_and_metadata(false, false, true, 10); + peers_and_metadata.extend(create_peers_and_metadata(true, false, true, 10)); + peers_and_metadata.extend(create_peers_and_metadata(false, true, true, 10)); + peers_and_metadata.extend(create_peers_and_metadata(true, true, true, 10)); + + // Sort the peers + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 40); + + // Verify the order of the first 20 peers + let (first_20_peers, sorted_peers) = sorted_peers.split_at(20); + verify_increasing_distance_latencies(&peers_and_metadata, first_20_peers); + + // Verify that the next 10 peers only have latency metadata + let (next_10_peers, sorted_peers) = sorted_peers.split_at(10); + for sorted_peer in next_10_peers { + let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); + assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); + assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_some()); + } + + // Verify that the last 10 peers have no metadata + let (last_10_peers, remaining_peers) = sorted_peers.split_at(10); + for sorted_peer in last_10_peers { + let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); + assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); + assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_none()); + } + assert!(remaining_peers.is_empty()); + } + + #[test] + fn test_sort_peers_by_distance_and_latency_filter() { + // Sort an empty list of peers + let peers_and_metadata = HashMap::new(); + assert!(sort_peers_by_subscription_optimality(&peers_and_metadata).is_empty()); + + // Create a list of peers with empty metadata (with consensus observer support) + let peers_and_metadata = create_peers_and_metadata(true, true, true, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 10); + + // Create a list of peers with empty metadata (without consensus observer support) + let peers_and_metadata = create_peers_and_metadata(true, true, false, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert!(sorted_peers.is_empty()); + + // Create a list of peers with valid metadata (without consensus observer support) + let peers_and_metadata = create_peers_and_metadata(false, false, false, 10); + + // Sort the peers and verify the results + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert!(sorted_peers.is_empty()); + + // Create a list of peers with empty metadata (with and without consensus observer support) + let mut peers_and_metadata = create_peers_and_metadata(true, true, true, 5); + peers_and_metadata.extend(create_peers_and_metadata(true, true, false, 50)); + + // Sort the peers and verify the results (only the supported peers are sorted) + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 5); + + // Create a list of peers with valid metadata (with and without consensus observer support) + let mut peers_and_metadata = create_peers_and_metadata(false, false, true, 50); + peers_and_metadata.extend(create_peers_and_metadata(false, false, false, 10)); + + // Sort the peers and verify the results (only the supported peers are sorted) + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers.len(), 50); + + // Create a list of peers with valid metadata (with and without consensus observer support) + let supported_peer_and_metadata = create_peers_and_metadata(false, false, true, 1); + let unsupported_peer_and_metadata = create_peers_and_metadata(false, false, false, 1); + let mut peers_and_metadata = HashMap::new(); + peers_and_metadata.extend(supported_peer_and_metadata.clone()); + peers_and_metadata.extend(unsupported_peer_and_metadata); + + // Sort the peers and verify the results (only the supported peer is sorted) + let supported_peer = supported_peer_and_metadata.keys().next().unwrap(); + let sorted_peers = sort_peers_by_subscription_optimality(&peers_and_metadata); + assert_eq!(sorted_peers, vec![*supported_peer]); + } + + #[tokio::test] + async fn test_sort_peers_for_subscriptions() { + // Create a consensus observer client + let network_ids = &[NetworkId::Validator, NetworkId::Vfn, NetworkId::Public]; + let (peers_and_metadata, consensus_observer_client, _) = + create_consensus_observer_client(network_ids); + + // Create a consensus publisher + let consensus_observer_config = ConsensusObserverConfig::default(); + let (consensus_publisher, _) = + ConsensusPublisher::new(consensus_observer_config, consensus_observer_client.clone()); + let consensus_publisher = Arc::new(consensus_publisher); + + // Sort the peers and verify that no peers are returned + let sorted_peers = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert!(sorted_peers.is_empty()); + + // Add a connected validator peer, VFN peer and public peer + for network_id in network_ids { + create_peer_and_connection( + *network_id, + peers_and_metadata.clone(), + get_distance_from_validators(network_id), + None, + true, + ); + } + + // Sort the peers and verify the ordering (according to distance) + let sorted_peers = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert_eq!(sorted_peers[0].network_id(), NetworkId::Validator); + assert_eq!(sorted_peers[1].network_id(), NetworkId::Vfn); + assert_eq!(sorted_peers[2].network_id(), NetworkId::Public); + assert_eq!(sorted_peers.len(), 3); + + // Sort the peers, but mark the validator as unhealthy (so it's ignored) + let sorted_peer_subset = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![sorted_peers[0]], + ); + assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Vfn); + assert_eq!(sorted_peer_subset[1].network_id(), NetworkId::Public); + assert_eq!(sorted_peer_subset.len(), 2); + + // Sort the peers, but mark the VFN and validator as active subscriptions (so they're ignored) + let sorted_peer_subset = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![sorted_peers[0], sorted_peers[1]], + vec![], + ); + assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Public); + assert_eq!(sorted_peer_subset.len(), 1); + + // Create a consensus publisher with the PFN as an active subscriber + let consensus_publisher_with_subscribers = + Arc::new(ConsensusPublisher::new_with_active_subscribers( + consensus_observer_config, + consensus_observer_client.clone(), + HashSet::from_iter(vec![sorted_peers[2]]), + )); + + // Sort the peers, and verify the PFN is ignored (since it's an active subscriber) + let sorted_peer_subset = sort_subscription_peers( + consensus_publisher_with_subscribers, + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert_eq!(sorted_peer_subset[0].network_id(), NetworkId::Validator); + assert_eq!(sorted_peer_subset[1].network_id(), NetworkId::Vfn); + assert_eq!(sorted_peer_subset.len(), 2); + + // Remove all the peers and verify that no peers are returned upon sorting + for peer_network_id in sorted_peers { + remove_peer_and_connection(peers_and_metadata.clone(), peer_network_id); + } + let sorted_peers = sort_subscription_peers( + consensus_publisher.clone(), + peers_and_metadata.clone(), + vec![], + vec![], + ); + assert!(sorted_peers.is_empty()); + + // Add multiple validator peers, with different latencies + let mut validator_peers = vec![]; + for ping_latency_secs in [0.9, 0.8, 0.5, 0.1, 0.05] { + let validator_peer = create_peer_and_connection( + NetworkId::Validator, + peers_and_metadata.clone(), + 0, + Some(ping_latency_secs), + true, + ); + validator_peers.push(validator_peer); + } + + // Sort the peers and verify the ordering (according to latency) + let sorted_peers = sort_subscription_peers( + consensus_publisher, + peers_and_metadata.clone(), + vec![], + vec![], + ); + let expected_peers = validator_peers.into_iter().rev().collect::>(); + assert_eq!(sorted_peers, expected_peers); + } + + /// Creates new subscriptions and verifies the results + async fn create_and_verify_subscriptions( + consensus_observer_config: ConsensusObserverConfig, + peers_and_metadata: Arc, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + peer_manager_request_receivers: &mut HashMap< + NetworkId, + aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, + >, + num_subscriptions_to_create: usize, + expected_subscription_peers: Vec, + ) { + // Get the connected peers and metadata + let connected_peers_and_metadata = peers_and_metadata + .get_connected_peers_and_metadata() + .unwrap(); + + // Spawn the subscription creation task + let subscription_creation_handle = tokio::spawn(async move { + create_new_subscriptions( + consensus_observer_config, + consensus_observer_client.clone(), + None, + Arc::new(MockDatabaseReader::new()), + TimeService::mock(), + connected_peers_and_metadata, + num_subscriptions_to_create, + vec![], + vec![], + ) + .await + }); + + // Handle the peer manager requests made by the subscription creation task + for expected_subscription_peer in &expected_subscription_peers { + handle_next_subscription_request( + expected_subscription_peer.network_id(), + peer_manager_request_receivers, + true, + ) + .await; + } + + // Wait for the subscription creation task to complete + let consensus_observer_subscriptions = subscription_creation_handle.await.unwrap(); + + // Verify the created subscriptions + assert_eq!( + consensus_observer_subscriptions.len(), + expected_subscription_peers.len() + ); + for subscription in consensus_observer_subscriptions { + assert!(expected_subscription_peers.contains(&subscription.get_peer_network_id())); + } + } + + /// Creates a new connection metadata for testing + fn create_connection_metadata( + peer_network_id: PeerNetworkId, + support_consensus_observer: bool, + ) -> ConnectionMetadata { + if support_consensus_observer { + // Create a protocol set that supports consensus observer + let protocol_set = ProtocolIdSet::from_iter(vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]); + + // Create the connection metadata with the protocol set + ConnectionMetadata::new( + peer_network_id.peer_id(), + ConnectionId::default(), + NetworkAddress::mock(), + ConnectionOrigin::Inbound, + MessagingProtocolVersion::V1, + protocol_set, + PeerRole::PreferredUpstream, + ) + } else { + ConnectionMetadata::mock(peer_network_id.peer_id()) + } + } + + /// Creates a new consensus observer client, along with the + /// associated network senders and peers and metadata. + fn create_consensus_observer_client( + network_ids: &[NetworkId], + ) -> ( + Arc, + Arc>>, + HashMap>, + ) { + // Create the network senders and receivers for each network + let mut network_senders = HashMap::new(); + let mut peer_manager_request_receivers = HashMap::new(); + for network_id in network_ids { + // Create the request managers + let queue_cfg = aptos_channel::Config::new(10).queue_style(QueueStyle::FIFO); + let (peer_manager_request_sender, peer_manager_request_receiver) = queue_cfg.build(); + let (connected_request_sender, _) = queue_cfg.build(); + + // Create the network sender + let network_sender = NetworkSender::new( + PeerManagerRequestSender::new(peer_manager_request_sender), + ConnectionRequestSender::new(connected_request_sender), + ); + + // Save the network sender and the request receiver + network_senders.insert(*network_id, network_sender); + peer_manager_request_receivers.insert(*network_id, peer_manager_request_receiver); + } + + // Create the network client + let peers_and_metadata = PeersAndMetadata::new(network_ids); + let network_client = NetworkClient::new( + vec![ProtocolId::ConsensusObserver], + vec![ProtocolId::ConsensusObserverRpc], + network_senders, + peers_and_metadata.clone(), + ); + + // Create the consensus observer client + let consensus_observer_client = Arc::new(ConsensusObserverClient::new(network_client)); + + ( + peers_and_metadata, + consensus_observer_client, + peer_manager_request_receivers, + ) + } + + /// Creates a new peer with the specified connection metadata + fn create_peer_and_connection( + network_id: NetworkId, + peers_and_metadata: Arc, + distance_from_validators: u64, + ping_latency_secs: Option, + support_consensus_observer: bool, + ) -> PeerNetworkId { + // Create the connection metadata + let peer_network_id = PeerNetworkId::new(network_id, PeerId::random()); + let connection_metadata = if support_consensus_observer { + // Create a protocol set that supports consensus observer + let protocol_set = ProtocolIdSet::from_iter(vec![ + ProtocolId::ConsensusObserver, + ProtocolId::ConsensusObserverRpc, + ]); + + // Create the connection metadata with the protocol set + ConnectionMetadata::new( + peer_network_id.peer_id(), + ConnectionId::default(), + NetworkAddress::mock(), + ConnectionOrigin::Inbound, + MessagingProtocolVersion::V1, + protocol_set, + PeerRole::PreferredUpstream, + ) + } else { + ConnectionMetadata::mock(peer_network_id.peer_id()) + }; + + // Insert the connection into peers and metadata + peers_and_metadata + .insert_connection_metadata(peer_network_id, connection_metadata.clone()) + .unwrap(); + + // Update the peer monitoring metadata + let latest_network_info_response = NetworkInformationResponse { + connected_peers: BTreeMap::new(), + distance_from_validators, + }; + let monitoring_metdata = PeerMonitoringMetadata::new( + ping_latency_secs, + ping_latency_secs, + Some(latest_network_info_response), + None, + None, + ); + peers_and_metadata + .update_peer_monitoring_metadata(peer_network_id, monitoring_metdata.clone()) + .unwrap(); + + peer_network_id + } + + /// Creates a new peer and metadata for testing + fn create_peer_and_metadata( + latency: Option, + distance_from_validators: Option, + support_consensus_observer: bool, + ) -> (PeerNetworkId, PeerMetadata) { + // Create a random peer + let peer_network_id = PeerNetworkId::random(); + + // Create a new peer metadata with the given latency and distance + let connection_metadata = + create_connection_metadata(peer_network_id, support_consensus_observer); + let network_information_response = + distance_from_validators.map(|distance| NetworkInformationResponse { + connected_peers: BTreeMap::new(), + distance_from_validators: distance, + }); + let peer_monitoring_metadata = + PeerMonitoringMetadata::new(latency, None, network_information_response, None, None); + let peer_metadata = + PeerMetadata::new_for_test(connection_metadata, peer_monitoring_metadata); + + (peer_network_id, peer_metadata) + } + + /// Creates a list of peers and metadata for testing + fn create_peers_and_metadata( + empty_latency: bool, + empty_distance: bool, + support_consensus_observer: bool, + num_peers: u64, + ) -> HashMap { + let mut peers_and_metadata = HashMap::new(); + for i in 1..num_peers + 1 { + // Determine the distance for the peer + let distance = if empty_distance { None } else { Some(i) }; + + // Determine the latency for the peer + let latency = if empty_latency { None } else { Some(i as f64) }; + + // Create a new peer and metadata + let (peer_network_id, peer_metadata) = + create_peer_and_metadata(latency, distance, support_consensus_observer); + peers_and_metadata.insert(peer_network_id, peer_metadata); + } + peers_and_metadata + } + + /// Returns the distance from the validators for the specified network + fn get_distance_from_validators(network_id: &NetworkId) -> u64 { + match network_id { + NetworkId::Validator => 0, + NetworkId::Vfn => 1, + NetworkId::Public => 2, + } + } + + /// Fetches and handles the next subscription request from the peer manager + async fn handle_next_subscription_request( + network_id: NetworkId, + peer_manager_request_receivers: &mut HashMap< + NetworkId, + aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, + >, + return_successfully: bool, + ) { + // Get the request receiver for the given network + let peer_manager_request_receiver = + peer_manager_request_receivers.get_mut(&network_id).unwrap(); + + // Wait for the next subscription request + match peer_manager_request_receiver.next().await { + Some(PeerManagerRequest::SendRpc(_, network_request)) => { + // Parse the network request + let data = network_request.data; + let response_sender = network_request.res_tx; + let message: ConsensusObserverMessage = bcs::from_bytes(data.as_ref()).unwrap(); + + // Process the network message + match message { + ConsensusObserverMessage::Request(request) => { + // Verify the request is for a new subscription + match request { + ConsensusObserverRequest::Subscribe => (), + _ => panic!( + "Unexpected consensus observer request received: {:?}!", + request + ), + } + + // Determine the response to send + let response = if return_successfully { + // Ack the subscription request + ConsensusObserverResponse::SubscribeAck + } else { + // Respond with the wrong message type + ConsensusObserverResponse::UnsubscribeAck + }; + let response_message = ConsensusObserverMessage::Response(response); + + // Send the response to the peer + let response_bytes = + bcs::to_bytes(&response_message).map(Bytes::from).unwrap(); + let _ = response_sender.send(Ok(response_bytes)); + }, + _ => panic!( + "Unexpected consensus observer message type received: {:?}!", + message + ), + } + }, + Some(PeerManagerRequest::SendDirectSend(_, _)) => { + panic!("Unexpected direct send message received!") + }, + None => panic!("No subscription request received!"), + } + } + + /// Removes the peer and connection metadata for the given peer + fn remove_peer_and_connection( + peers_and_metadata: Arc, + peer_network_id: PeerNetworkId, + ) { + let peer_metadata = peers_and_metadata + .get_metadata_for_peer(peer_network_id) + .unwrap(); + let connection_id = peer_metadata.get_connection_metadata().connection_id; + peers_and_metadata + .remove_peer_metadata(peer_network_id, connection_id) + .unwrap(); + } + + /// A simple helper method that sorts the given peers for a subscription + fn sort_subscription_peers( + consensus_publisher: Arc, + peers_and_metadata: Arc, + active_subscription_peers: Vec, + unhealthy_subscription_peers: Vec, + ) -> Vec { + // Get the connected peers and metadata + let connected_peers_and_metadata = peers_and_metadata + .get_connected_peers_and_metadata() + .unwrap(); + + // Sort the peers for subscription requests + sort_peers_for_subscriptions( + connected_peers_and_metadata, + unhealthy_subscription_peers, + active_subscription_peers, + Some(consensus_publisher), + ) + .unwrap() + } + + /// Verifies that the distance and latencies for the peers are in + /// increasing order (with the distance taking precedence over the latency). + fn verify_increasing_distance_latencies( + peers_and_metadata: &HashMap, + sorted_peers: &[PeerNetworkId], + ) { + let mut previous_latency = None; + let mut previous_distance = 0; + for sorted_peer in sorted_peers { + // Get the distance and latency for the peer + let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); + let distance = get_distance_for_peer(sorted_peer, peer_metadata).unwrap(); + let latency = get_latency_for_peer(sorted_peer, peer_metadata); + + // Verify the order of the peers + if distance == previous_distance { + if let Some(latency) = latency { + if let Some(previous_latency) = previous_latency { + assert!(latency >= previous_latency); + } + } + } else { + assert!(distance > previous_distance); + } + + // Update the previous latency and distance + previous_latency = latency; + previous_distance = distance; + } + } +} diff --git a/consensus/src/consensus_observer/publisher.rs b/consensus/src/consensus_observer/publisher/consensus_publisher.rs similarity index 79% rename from consensus/src/consensus_observer/publisher.rs rename to consensus/src/consensus_observer/publisher/consensus_publisher.rs index c56a817245fba..899901593f7ed 100644 --- a/consensus/src/consensus_observer/publisher.rs +++ b/consensus/src/consensus_observer/publisher/consensus_publisher.rs @@ -2,20 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 use crate::consensus_observer::{ - logging::{LogEntry, LogEvent, LogSchema}, - metrics, - network_client::ConsensusObserverClient, - network_events::ResponseSender, - network_message::{ - ConsensusObserverDirectSend, ConsensusObserverMessage, ConsensusObserverRequest, - ConsensusObserverResponse, + common::{ + logging::{LogEntry, LogEvent, LogSchema}, + metrics, + }, + network::{ + network_handler::ConsensusPublisherNetworkMessage, + observer_client::ConsensusObserverClient, + observer_message::{ + ConsensusObserverDirectSend, ConsensusObserverMessage, ConsensusObserverRequest, + ConsensusObserverResponse, + }, }, }; +use aptos_channels::aptos_channel::Receiver; use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; use aptos_infallible::RwLock; -use aptos_logger::{info, warn}; +use aptos_logger::{error, info, warn}; use aptos_network::application::interface::NetworkClient; -use futures::{SinkExt, StreamExt}; +use futures::StreamExt; use futures_channel::mpsc; use std::{collections::HashSet, sync::Arc, time::Duration}; use tokio::time::interval; @@ -40,8 +45,10 @@ pub struct ConsensusPublisher { impl ConsensusPublisher { pub fn new( - network_client: NetworkClient, consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, ) -> ( Self, mpsc::Receiver<(PeerNetworkId, ConsensusObserverDirectSend)>, @@ -53,7 +60,7 @@ impl ConsensusPublisher { // Create the consensus publisher let consensus_publisher = Self { - consensus_observer_client: Arc::new(ConsensusObserverClient::new(network_client)), + consensus_observer_client, consensus_observer_config, active_subscribers: Arc::new(RwLock::new(HashSet::new())), outbound_message_sender, @@ -63,10 +70,35 @@ impl ConsensusPublisher { (consensus_publisher, outbound_message_receiver) } + #[cfg(test)] + /// Creates a new consensus publisher with the given active subscribers + pub fn new_with_active_subscribers( + consensus_observer_config: ConsensusObserverConfig, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + active_subscribers: HashSet, + ) -> Self { + // Create the consensus publisher + let (consensus_publisher, _) = + ConsensusPublisher::new(consensus_observer_config, consensus_observer_client); + + // Update the active subscribers + *consensus_publisher.active_subscribers.write() = active_subscribers; + + // Return the publisher + consensus_publisher + } + + /// Adds the given subscriber to the set of active subscribers + fn add_active_subscriber(&self, peer_network_id: PeerNetworkId) { + self.active_subscribers.write().insert(peer_network_id); + } + /// Garbage collect inactive subscriptions by removing peers that are no longer connected fn garbage_collect_subscriptions(&self) { // Get the set of active subscribers - let active_subscribers = self.active_subscribers.read().clone(); + let active_subscribers = self.get_active_subscribers(); // Get the connected peers and metadata let peers_and_metadata = self.consensus_observer_client.get_peers_and_metadata(); @@ -95,7 +127,7 @@ impl ConsensusPublisher { // Remove any subscriptions from peers that are no longer connected for peer_network_id in &disconnected_subscribers { - self.active_subscribers.write().remove(peer_network_id); + self.remove_active_subscriber(peer_network_id); info!(LogSchema::new(LogEntry::ConsensusPublisher) .event(LogEvent::Subscription) .message(&format!( @@ -105,7 +137,7 @@ impl ConsensusPublisher { } // Update the number of active subscribers for each network - let active_subscribers = self.active_subscribers.read().clone(); + let active_subscribers = self.get_active_subscribers(); for network_id in peers_and_metadata.get_registered_networks() { // Calculate the number of active subscribers for the network let num_active_subscribers = active_subscribers @@ -127,32 +159,28 @@ impl ConsensusPublisher { self.active_subscribers.read().clone() } - /// Returns a copy of the consensus observer client - pub fn get_consensus_observer_client( - &self, - ) -> Arc>> { - self.consensus_observer_client.clone() + /// Removes the given subscriber from the set of active subscribers + fn remove_active_subscriber(&self, peer_network_id: &PeerNetworkId) { + self.active_subscribers.write().remove(peer_network_id); } - /// Handles a subscription message from a peer - pub fn handle_subscription_request( - &self, - peer_network_id: &PeerNetworkId, - request: ConsensusObserverRequest, - response_sender: ResponseSender, - ) { + /// Processes a network message received by the consensus publisher + fn process_network_message(&self, network_message: ConsensusPublisherNetworkMessage) { + // Unpack the network message + let (peer_network_id, message, response_sender) = network_message.into_parts(); + // Update the RPC request counter - metrics::increment_request_counter( + metrics::increment_counter( &metrics::PUBLISHER_RECEIVED_REQUESTS, - request.get_label(), - peer_network_id, + message.get_label(), + &peer_network_id, ); - // Handle the request - match request { + // Handle the message + match message { ConsensusObserverRequest::Subscribe => { // Add the peer to the set of active subscribers - self.active_subscribers.write().insert(*peer_network_id); + self.add_active_subscriber(peer_network_id); info!(LogSchema::new(LogEntry::ConsensusPublisher) .event(LogEvent::Subscription) .message(&format!( @@ -165,7 +193,7 @@ impl ConsensusPublisher { }, ConsensusObserverRequest::Unsubscribe => { // Remove the peer from the set of active subscribers - self.active_subscribers.write().remove(peer_network_id); + self.remove_active_subscriber(&peer_network_id); info!(LogSchema::new(LogEntry::ConsensusPublisher) .event(LogEvent::Subscription) .message(&format!( @@ -179,25 +207,25 @@ impl ConsensusPublisher { } } - /// Publishes a direct send message to all active subscribers - pub async fn publish_message(&self, message: ConsensusObserverDirectSend) { - // Get the set of active subscribers - let active_subscribers = self.active_subscribers.read().clone(); + /// Publishes a direct send message to all active subscribers. Note: this method + /// is non-blocking (to avoid blocking callers during publishing, e.g., consensus). + pub fn publish_message(&self, message: ConsensusObserverDirectSend) { + // Get the active subscribers + let active_subscribers = self.get_active_subscribers(); // Send the message to all active subscribers for peer_network_id in &active_subscribers { // Send the message to the outbound receiver for publishing let mut outbound_message_sender = self.outbound_message_sender.clone(); - if let Err(error) = outbound_message_sender - .send((*peer_network_id, message.clone())) - .await + if let Err(error) = + outbound_message_sender.try_send((*peer_network_id, message.clone())) { // The message send failed warn!(LogSchema::new(LogEntry::ConsensusPublisher) - .event(LogEvent::SendDirectSendMessage) - .message(&format!( - "Failed to send outbound message to the receiver for peer {:?}! Error: {:?}", - peer_network_id, error + .event(LogEvent::SendDirectSendMessage) + .message(&format!( + "Failed to send outbound message to the receiver for peer {:?}! Error: {:?}", + peer_network_id, error ))); } } @@ -207,6 +235,7 @@ impl ConsensusPublisher { pub async fn start( self, outbound_message_receiver: mpsc::Receiver<(PeerNetworkId, ConsensusObserverDirectSend)>, + mut publisher_message_receiver: Receiver<(), ConsensusPublisherNetworkMessage>, ) { // Spawn the message serializer and sender spawn_message_serializer_and_sender( @@ -227,12 +256,21 @@ impl ConsensusPublisher { .message("Starting the consensus publisher garbage collection loop!")); loop { tokio::select! { + Some(network_message) = publisher_message_receiver.next() => { + self.process_network_message(network_message); + }, _ = garbage_collection_interval.select_next_some() => { - // Perform garbage collection self.garbage_collect_subscriptions(); }, + else => { + break; // Exit the consensus publisher loop + } } } + + // Log the exit of the consensus publisher loop + error!(LogSchema::new(LogEntry::ConsensusPublisher) + .message("The consensus publisher loop exited unexpectedly!")); } } @@ -314,8 +352,10 @@ fn spawn_message_serializer_and_sender( #[cfg(test)] mod test { use super::*; + use crate::consensus_observer::network::{ + network_events::ResponseSender, observer_message::BlockTransactionPayload, + }; use aptos_config::network_id::NetworkId; - use aptos_consensus_types::pipeline::commit_decision::CommitDecision; use aptos_crypto::HashValue; use aptos_network::{ application::{metadata::ConnectionState, storage::PeersAndMetadata}, @@ -338,10 +378,13 @@ mod test { let peers_and_metadata = PeersAndMetadata::new(&[network_id]); let network_client = NetworkClient::new(vec![], vec![], hashmap![], peers_and_metadata.clone()); + let consensus_observer_client = Arc::new(ConsensusObserverClient::new(network_client)); // Create a consensus publisher - let (consensus_publisher, _) = - ConsensusPublisher::new(network_client, ConsensusObserverConfig::default()); + let (consensus_publisher, _) = ConsensusPublisher::new( + ConsensusObserverConfig::default(), + consensus_observer_client, + ); // Add a peer to the peers and metadata let peer_network_id_1 = PeerNetworkId::new(network_id, PeerId::random()); @@ -406,10 +449,13 @@ mod test { let peers_and_metadata = PeersAndMetadata::new(&[network_id]); let network_client = NetworkClient::new(vec![], vec![], hashmap![], peers_and_metadata.clone()); + let consensus_observer_client = Arc::new(ConsensusObserverClient::new(network_client)); // Create a consensus publisher - let (consensus_publisher, _) = - ConsensusPublisher::new(network_client, ConsensusObserverConfig::default()); + let (consensus_publisher, _) = ConsensusPublisher::new( + ConsensusObserverConfig::default(), + consensus_observer_client, + ); // Subscribe a new peer to consensus updates and verify the subscription let peer_network_id_1 = PeerNetworkId::new(network_id, PeerId::random()); @@ -457,10 +503,13 @@ mod test { let peers_and_metadata = PeersAndMetadata::new(&[network_id]); let network_client = NetworkClient::new(vec![], vec![], hashmap![], peers_and_metadata.clone()); + let consensus_observer_client = Arc::new(ConsensusObserverClient::new(network_client)); // Create a consensus publisher - let (consensus_publisher, mut outbound_message_receiver) = - ConsensusPublisher::new(network_client, ConsensusObserverConfig::default()); + let (consensus_publisher, mut outbound_message_receiver) = ConsensusPublisher::new( + ConsensusObserverConfig::default(), + consensus_observer_client, + ); // Subscribe a new peer to consensus updates let peer_network_id_1 = PeerNetworkId::new(network_id, PeerId::random()); @@ -474,9 +523,7 @@ mod test { AggregateSignature::empty(), ), ); - consensus_publisher - .publish_message(ordered_block_message.clone()) - .await; + consensus_publisher.publish_message(ordered_block_message.clone()); // Verify that the message was sent to the outbound message receiver let (peer_network_id, message) = outbound_message_receiver.next().await.unwrap(); @@ -492,14 +539,17 @@ mod test { } // Publish a message to the active subscribers - let block_payload_message = ConsensusObserverMessage::new_block_payload_message( - BlockInfo::empty(), + let transaction_payload = BlockTransactionPayload::new_quorum_store_inline_hybrid( + vec![], vec![], Some(10), + vec![], ); - consensus_publisher - .publish_message(block_payload_message.clone()) - .await; + let block_payload_message = ConsensusObserverMessage::new_block_payload_message( + BlockInfo::empty(), + transaction_payload, + ); + consensus_publisher.publish_message(block_payload_message.clone()); // Verify that the message was sent to all active subscribers let num_expected_messages = additional_peer_network_ids.len() + 1; @@ -516,15 +566,12 @@ mod test { process_unsubscription_for_peer(&consensus_publisher, &peer_network_id_1); // Publish another message to the active subscribers - let commit_decision_message = ConsensusObserverMessage::new_commit_decision_message( - CommitDecision::new(LedgerInfoWithSignatures::new( + let commit_decision_message = + ConsensusObserverMessage::new_commit_decision_message(LedgerInfoWithSignatures::new( LedgerInfo::new(BlockInfo::empty(), HashValue::zero()), AggregateSignature::empty(), - )), - ); - consensus_publisher - .publish_message(commit_decision_message.clone()) - .await; + )); + consensus_publisher.publish_message(commit_decision_message.clone()); // Verify that the message was sent to all active subscribers except the first peer for _ in 0..additional_peer_network_ids.len() { @@ -539,11 +586,11 @@ mod test { } // Publish another message to the active subscribers - let block_payload_message = - ConsensusObserverMessage::new_block_payload_message(BlockInfo::empty(), vec![], None); - consensus_publisher - .publish_message(block_payload_message.clone()) - .await; + let block_payload_message = ConsensusObserverMessage::new_block_payload_message( + BlockInfo::empty(), + BlockTransactionPayload::empty(), + ); + consensus_publisher.publish_message(block_payload_message.clone()); // Verify that no messages were sent to the outbound message receiver assert!(outbound_message_receiver.next().now_or_never().is_none()); @@ -554,11 +601,15 @@ mod test { consensus_publisher: &ConsensusPublisher, peer_network_id: &PeerNetworkId, ) { - consensus_publisher.handle_subscription_request( - peer_network_id, + // Create the subscribe message + let network_message = ConsensusPublisherNetworkMessage::new( + *peer_network_id, ConsensusObserverRequest::Subscribe, ResponseSender::new_for_test(), ); + + // Process the subscription request + consensus_publisher.process_network_message(network_message); } /// Processes an unsubscription request for the given peer @@ -566,11 +617,15 @@ mod test { consensus_publisher: &ConsensusPublisher, peer_network_id: &PeerNetworkId, ) { - consensus_publisher.handle_subscription_request( - peer_network_id, + // Create the unsubscribe message + let network_message = ConsensusPublisherNetworkMessage::new( + *peer_network_id, ConsensusObserverRequest::Unsubscribe, ResponseSender::new_for_test(), ); + + // Process the unsubscription request + consensus_publisher.process_network_message(network_message); } /// Verifies the active subscribers has the expected size and contains the expected peers diff --git a/consensus/src/consensus_observer/publisher/mod.rs b/consensus/src/consensus_observer/publisher/mod.rs new file mode 100644 index 0000000000000..86beebea1270e --- /dev/null +++ b/consensus/src/consensus_observer/publisher/mod.rs @@ -0,0 +1,4 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod consensus_publisher; diff --git a/consensus/src/consensus_observer/subscription.rs b/consensus/src/consensus_observer/subscription.rs deleted file mode 100644 index 57aca170e59a8..0000000000000 --- a/consensus/src/consensus_observer/subscription.rs +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::consensus_observer::{ - error::Error, - logging::{LogEntry, LogSchema}, -}; -use aptos_config::{config::ConsensusObserverConfig, network_id::PeerNetworkId}; -use aptos_logger::warn; -use aptos_network::application::metadata::PeerMetadata; -use aptos_storage_interface::DbReader; -use aptos_time_service::{TimeService, TimeServiceTrait}; -use ordered_float::OrderedFloat; -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, - time::{Duration, Instant}, -}; - -// A useful constant for representing the maximum ping latency -const MAX_PING_LATENCY_SECS: f64 = 10_000.0; - -/// A single consensus observer subscription -pub struct ConsensusObserverSubscription { - // The configuration of the consensus observer - consensus_observer_config: ConsensusObserverConfig, - - // A handle to storage (used to read the latest state and check progress) - db_reader: Arc, - - // The peer network id of the active subscription - peer_network_id: PeerNetworkId, - - // The timestamp of the last message received from the peer - last_message_receive_time: Instant, - - // The timestamp of the last peer optimality check - last_peer_optimality_check: Instant, - - // The highest synced version we've seen from storage, along with the time at which it was seen - highest_synced_version_and_time: (u64, Instant), - - // The time service (used to check the last message receive time) - time_service: TimeService, -} - -impl ConsensusObserverSubscription { - pub fn new( - consensus_observer_config: ConsensusObserverConfig, - db_reader: Arc, - peer_network_id: PeerNetworkId, - time_service: TimeService, - ) -> Self { - let time_now = time_service.now(); - - Self { - consensus_observer_config, - db_reader, - peer_network_id, - last_message_receive_time: time_now, - last_peer_optimality_check: time_now, - highest_synced_version_and_time: (0, time_now), - time_service, - } - } - - /// Verifies that the peer selected for the subscription is optimal - /// based on the set of currently available peers. This is done - /// periodically to avoid excessive subscription terminations. - pub fn check_subscription_peer_optimality( - &mut self, - peers_and_metadata: HashMap, - ) -> Result<(), Error> { - // Check if we need to perform the peer optimality check - let time_now = self.time_service.now(); - let duration_since_last_check = time_now.duration_since(self.last_peer_optimality_check); - if duration_since_last_check - < Duration::from_millis( - self.consensus_observer_config - .peer_optimality_check_interval_ms, - ) - { - return Ok(()); // We don't need to check the peer optimality yet - } - - // Update the last peer optimality check time - self.last_peer_optimality_check = time_now; - - // Verify that we're subscribed to the most optimal peer - if let Some(optimal_peer) = sort_peers_by_distance_and_latency(peers_and_metadata).first() { - if *optimal_peer != self.peer_network_id { - return Err(Error::SubscriptionSuboptimal(format!( - "Subscription to peer: {} is no longer optimal! New optimal peer: {}", - self.peer_network_id, optimal_peer - ))); - } - } - - Ok(()) - } - - /// Verifies that the subscription has not timed out based - /// on the last received message time. - pub fn check_subscription_timeout(&self) -> Result<(), Error> { - // Calculate the duration since the last message - let time_now = self.time_service.now(); - let duration_since_last_message = time_now.duration_since(self.last_message_receive_time); - - // Check if the subscription has timed out - if duration_since_last_message - > Duration::from_millis(self.consensus_observer_config.max_subscription_timeout_ms) - { - return Err(Error::SubscriptionTimeout(format!( - "Subscription to peer: {} has timed out! No message received for: {:?}", - self.peer_network_id, duration_since_last_message - ))); - } - - Ok(()) - } - - /// Verifies that the DB is continuing to sync and commit new data - pub fn check_syncing_progress(&mut self) -> Result<(), Error> { - // Get the current synced version from storage - let current_synced_version = - self.db_reader - .get_latest_ledger_info_version() - .map_err(|error| { - Error::UnexpectedError(format!( - "Failed to read highest synced version: {:?}", - error - )) - })?; - - // Verify that the synced version is increasing appropriately - let (highest_synced_version, highest_version_timestamp) = - self.highest_synced_version_and_time; - if current_synced_version <= highest_synced_version { - // The synced version hasn't increased. Check if we should terminate - // the subscription based on the last time the highest synced version was seen. - let time_now = self.time_service.now(); - let duration_since_highest_seen = time_now.duration_since(highest_version_timestamp); - if duration_since_highest_seen - > Duration::from_millis( - self.consensus_observer_config.max_synced_version_timeout_ms, - ) - { - return Err(Error::SubscriptionProgressStopped(format!( - "The DB is not making sync progress! Highest synced version: {}, elapsed: {:?}", - highest_synced_version, duration_since_highest_seen - ))); - } - } - - // Update the highest synced version and time - self.highest_synced_version_and_time = (current_synced_version, self.time_service.now()); - - Ok(()) - } - - /// Returns the peer network id of the subscription - pub fn get_peer_network_id(&self) -> PeerNetworkId { - self.peer_network_id - } - - /// Verifies the given message is from the expected peer - pub fn verify_message_sender(&mut self, peer_network_id: &PeerNetworkId) -> Result<(), Error> { - // Verify the message is from the expected peer - if self.peer_network_id != *peer_network_id { - return Err(Error::UnexpectedError(format!( - "Received message from unexpected peer: {}! Subscribed to: {}", - peer_network_id, self.peer_network_id - ))); - } - - // Update the last message receive time - self.last_message_receive_time = self.time_service.now(); - - Ok(()) - } -} - -/// Gets the distance from the validators for the specified peer from the peer metadata -fn get_distance_for_peer( - peer_network_id: &PeerNetworkId, - peer_metadata: &PeerMetadata, -) -> Option { - // Get the distance for the peer - let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); - let distance = peer_monitoring_metadata - .latest_network_info_response - .as_ref() - .map(|response| response.distance_from_validators); - - // If the distance is missing, log a warning - if distance.is_none() { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Unable to get distance for peer! Peer: {:?}", - peer_network_id - )) - ); - } - - distance -} - -/// Gets the latency for the specified peer from the peer metadata -fn get_latency_for_peer( - peer_network_id: &PeerNetworkId, - peer_metadata: &PeerMetadata, -) -> Option { - // Get the latency for the peer - let peer_monitoring_metadata = peer_metadata.get_peer_monitoring_metadata(); - let latency = peer_monitoring_metadata.average_ping_latency_secs; - - // If the latency is missing, log a warning - if latency.is_none() { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Unable to get latency for peer! Peer: {:?}", - peer_network_id - )) - ); - } - - latency -} - -/// Sorts the peers by distance from the validator set and latency. -/// We prioritize distance over latency as we want to avoid close -/// but not up-to-date peers. If peers don't have sufficient metadata -/// for sorting, they are given a lower priority. -pub fn sort_peers_by_distance_and_latency( - peers_and_metadata: HashMap, -) -> Vec { - // Group peers and latencies by validator distance, i.e., distance -> [(peer, latency)] - let mut peers_and_latencies_by_distance = BTreeMap::new(); - for (peer_network_id, peer_metadata) in peers_and_metadata { - // Get the distance and latency for the peer - let distance = get_distance_for_peer(&peer_network_id, &peer_metadata); - let latency = get_latency_for_peer(&peer_network_id, &peer_metadata); - - // If the distance is not found, use the maximum distance - let distance = - distance.unwrap_or(aptos_peer_monitoring_service_types::MAX_DISTANCE_FROM_VALIDATORS); - - // If the latency is not found, use a large latency - let latency = latency.unwrap_or(MAX_PING_LATENCY_SECS); - - // Add the peer and latency to the distance group - peers_and_latencies_by_distance - .entry(distance) - .or_insert_with(Vec::new) - .push((peer_network_id, OrderedFloat(latency))); - } - - // Sort the peers by distance and latency. Note: BTreeMaps are - // sorted by key, so the entries will be sorted by distance in ascending order. - let mut sorted_peers = Vec::new(); - for (_, mut peers_and_latencies) in peers_and_latencies_by_distance { - // Sort the peers by latency - peers_and_latencies.sort_by_key(|(_, latency)| *latency); - - // Add the peers to the sorted list (in sorted order) - sorted_peers.extend( - peers_and_latencies - .into_iter() - .map(|(peer_network_id, _)| peer_network_id), - ); - } - - sorted_peers -} - -#[cfg(test)] -mod test { - use super::*; - use aptos_network::transport::ConnectionMetadata; - use aptos_peer_monitoring_service_types::{ - response::NetworkInformationResponse, PeerMonitoringMetadata, - }; - use aptos_storage_interface::Result; - use aptos_types::transaction::Version; - use mockall::mock; - - // This is a simple mock of the DbReader (it generates a MockDatabaseReader) - mock! { - pub DatabaseReader {} - impl DbReader for DatabaseReader { - fn get_latest_ledger_info_version(&self) -> Result; - } - } - - #[test] - fn check_subscription_peer_optimality() { - // Create a new observer subscription - let consensus_observer_config = ConsensusObserverConfig::default(); - let peer_network_id = PeerNetworkId::random(); - let time_service = TimeService::mock(); - let mut subscription = ConsensusObserverSubscription::new( - consensus_observer_config, - Arc::new(MockDatabaseReader::new()), - peer_network_id, - time_service.clone(), - ); - - // Verify the time of the last peer optimality check - let current_time = time_service.now(); - assert_eq!(subscription.last_peer_optimality_check, current_time); - - // Verify that the peer is optimal (not enough time has elapsed to check) - assert!(subscription - .check_subscription_peer_optimality(HashMap::new()) - .is_ok()); - - // Elapse some amount of time (but not enough to check optimality) - let mock_time_service = time_service.into_mock(); - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.peer_optimality_check_interval_ms / 2, - )); - - // Verify that the original peer is still optimal even though it is missing metadata - let new_optimal_peer = PeerNetworkId::random(); - let mut peers_and_metadata = HashMap::new(); - peers_and_metadata.insert( - new_optimal_peer, - PeerMetadata::new_for_test( - ConnectionMetadata::mock(new_optimal_peer.peer_id()), - PeerMonitoringMetadata::new(None, None, None, None, None), - ), - ); - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_ok()); - - // Elapse enough time to check optimality - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.peer_optimality_check_interval_ms + 1, - )); - - // Verify that the original peer is no longer optimal - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata.clone()) - .is_err()); - - // Add the original peer to the list of peers (with optimal metadata) - peers_and_metadata.insert( - peer_network_id, - PeerMetadata::new_for_test( - ConnectionMetadata::mock(peer_network_id.peer_id()), - PeerMonitoringMetadata::new(Some(0.1), None, None, None, None), - ), - ); - - // Verify that the peer is still optimal - assert!(subscription - .check_subscription_peer_optimality(peers_and_metadata) - .is_ok()); - - // Verify the time of the last peer optimality check - let current_time = mock_time_service.now(); - assert_eq!(subscription.last_peer_optimality_check, current_time); - } - - #[test] - fn test_check_subscription_timeout() { - // Create a new observer subscription - let consensus_observer_config = ConsensusObserverConfig::default(); - let peer_network_id = PeerNetworkId::random(); - let time_service = TimeService::mock(); - let mut subscription = ConsensusObserverSubscription::new( - consensus_observer_config, - Arc::new(MockDatabaseReader::new()), - peer_network_id, - time_service.clone(), - ); - - // Verify that the subscription has not timed out and that the last message time is updated - let current_time = time_service.now(); - assert!(subscription.check_subscription_timeout().is_ok()); - assert_eq!(subscription.last_message_receive_time, current_time); - - // Elapse some amount of time (but not enough to timeout) - let mock_time_service = time_service.into_mock(); - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.max_subscription_timeout_ms / 2, - )); - - // Verify that the subscription has not timed out - assert!(subscription.check_subscription_timeout().is_ok()); - - // Verify a new message is received successfully and that the last message time is updated - let current_time = mock_time_service.now(); - subscription - .verify_message_sender(&peer_network_id) - .unwrap(); - assert_eq!(subscription.last_message_receive_time, current_time); - - // Verify that the subscription has not timed out - assert!(subscription.check_subscription_timeout().is_ok()); - - // Elapse enough time to timeout the subscription - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.max_subscription_timeout_ms + 1, - )); - - // Verify that the subscription has timed out - assert!(subscription.check_subscription_timeout().is_err()); - } - - #[test] - fn test_check_syncing_progress() { - // Create a mock DB reader with expectations - let first_synced_version = 10; - let second_synced_version = 20; - let mut mock_db_reader = MockDatabaseReader::new(); - mock_db_reader - .expect_get_latest_ledger_info_version() - .returning(move || Ok(first_synced_version)) - .times(2); // Only allow two calls for the first version - mock_db_reader - .expect_get_latest_ledger_info_version() - .returning(move || Ok(second_synced_version)); // Allow multiple calls for the second version - - // Create a new observer subscription - let consensus_observer_config = ConsensusObserverConfig::default(); - let peer_network_id = PeerNetworkId::random(); - let time_service = TimeService::mock(); - let mut subscription = ConsensusObserverSubscription::new( - consensus_observer_config, - Arc::new(mock_db_reader), - peer_network_id, - time_service.clone(), - ); - - // Verify that the DB is making sync progress and that the highest synced version is updated - let current_time = time_service.now(); - assert!(subscription.check_syncing_progress().is_ok()); - assert_eq!( - subscription.highest_synced_version_and_time, - (first_synced_version, current_time) - ); - - // Elapse some amount of time (not enough to timeout) - let mock_time_service = time_service.into_mock(); - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.max_synced_version_timeout_ms / 2, - )); - - // Verify that the DB is still making sync progress - let current_time = mock_time_service.now(); - assert!(subscription.check_syncing_progress().is_ok()); - assert_eq!( - subscription.highest_synced_version_and_time, - (first_synced_version, current_time) - ); - - // Elapse enough time to timeout the subscription - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.max_synced_version_timeout_ms + 1, - )); - - // Verify that the DB is still making sync progress (the next version is higher) - let current_time = mock_time_service.now(); - assert!(subscription.check_syncing_progress().is_ok()); - assert_eq!( - subscription.highest_synced_version_and_time, - (second_synced_version, current_time) - ); - - // Elapse enough time to timeout the subscription - mock_time_service.advance(Duration::from_millis( - consensus_observer_config.max_synced_version_timeout_ms + 1, - )); - - // Verify that the DB is not making sync progress and that the subscription has timed out - assert!(subscription.check_syncing_progress().is_err()); - } - - #[test] - fn test_verify_message_sender() { - // Create a new observer subscription - let consensus_observer_config = ConsensusObserverConfig::default(); - let peer_network_id = PeerNetworkId::random(); - let time_service = TimeService::mock(); - let mut subscription = ConsensusObserverSubscription::new( - consensus_observer_config, - Arc::new(MockDatabaseReader::new()), - peer_network_id, - time_service.clone(), - ); - - // Verify that the message sender is valid - let current_time = time_service.now(); - assert!(subscription.verify_message_sender(&peer_network_id).is_ok()); - assert_eq!(subscription.last_message_receive_time, current_time); - - // Elapse some amount of time - let mock_time_service = time_service.into_mock(); - mock_time_service.advance(Duration::from_secs(10)); - - // Verify that the message sender is not the expected peer - let other_peer_network_id = PeerNetworkId::random(); - assert!(subscription - .verify_message_sender(&other_peer_network_id) - .is_err()); - assert_eq!(subscription.last_message_receive_time, current_time); - - // Elapse more time - mock_time_service.advance(Duration::from_secs(10)); - - // Verify that the message sender is the expected peer and that the last message time is updated - let current_time = mock_time_service.now(); - assert!(subscription.verify_message_sender(&peer_network_id).is_ok()); - assert_eq!(subscription.last_message_receive_time, current_time); - } - - #[test] - fn test_sort_peers_by_distance_and_latency() { - // Sort an empty list of peers - let peers_and_metadata = HashMap::new(); - assert!(sort_peers_by_distance_and_latency(peers_and_metadata).is_empty()); - - // Create a list of peers with empty metadata - let peers_and_metadata = create_peers_and_metadata(true, true, 10); - - // Sort the peers and verify the results - let sorted_peers = sort_peers_by_distance_and_latency(peers_and_metadata); - assert_eq!(sorted_peers.len(), 10); - - // Create a list of peers with valid metadata - let peers_and_metadata = create_peers_and_metadata(false, false, 10); - - // Sort the peers - let sorted_peers = sort_peers_by_distance_and_latency(peers_and_metadata.clone()); - - // Verify the order of the peers - verify_increasing_distance_latencies(&peers_and_metadata, &sorted_peers); - assert_eq!(sorted_peers.len(), 10); - - // Create a list of peers with and without metadata - let mut peers_and_metadata = create_peers_and_metadata(false, false, 10); - peers_and_metadata.extend(create_peers_and_metadata(true, false, 10)); - peers_and_metadata.extend(create_peers_and_metadata(false, true, 10)); - peers_and_metadata.extend(create_peers_and_metadata(true, true, 10)); - - // Sort the peers - let sorted_peers = sort_peers_by_distance_and_latency(peers_and_metadata.clone()); - assert_eq!(sorted_peers.len(), 40); - - // Verify the order of the first 20 peers - let (first_20_peers, sorted_peers) = sorted_peers.split_at(20); - verify_increasing_distance_latencies(&peers_and_metadata, first_20_peers); - - // Verify that the next 10 peers only have latency metadata - let (next_10_peers, sorted_peers) = sorted_peers.split_at(10); - for sorted_peer in next_10_peers { - let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); - assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); - assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_some()); - } - - // Verify that the last 10 peers have no metadata - let (last_10_peers, remaining_peers) = sorted_peers.split_at(10); - for sorted_peer in last_10_peers { - let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); - assert!(get_distance_for_peer(sorted_peer, peer_metadata).is_none()); - assert!(get_latency_for_peer(sorted_peer, peer_metadata).is_none()); - } - assert!(remaining_peers.is_empty()); - } - - /// Creates a new peer and metadata for testing - fn create_peer_and_metadata( - latency: Option, - distance_from_validators: Option, - ) -> (PeerNetworkId, PeerMetadata) { - // Create a random peer - let peer_network_id = PeerNetworkId::random(); - - // Create a new peer metadata with the given latency and distance - let connection_metadata = ConnectionMetadata::mock(peer_network_id.peer_id()); - let network_information_response = - distance_from_validators.map(|distance| NetworkInformationResponse { - connected_peers: BTreeMap::new(), - distance_from_validators: distance, - }); - let peer_monitoring_metadata = - PeerMonitoringMetadata::new(latency, None, network_information_response, None, None); - let peer_metadata = - PeerMetadata::new_for_test(connection_metadata, peer_monitoring_metadata); - - (peer_network_id, peer_metadata) - } - - /// Creates a list of peers and metadata for testing - fn create_peers_and_metadata( - empty_latency: bool, - empty_distance: bool, - num_peers: u64, - ) -> HashMap { - let mut peers_and_metadata = HashMap::new(); - for i in 1..num_peers + 1 { - // Determine the distance for the peer - let distance = if empty_distance { None } else { Some(i) }; - - // Determine the latency for the peer - let latency = if empty_latency { None } else { Some(i as f64) }; - - // Create a new peer and metadata - let (peer_network_id, peer_metadata) = create_peer_and_metadata(latency, distance); - peers_and_metadata.insert(peer_network_id, peer_metadata); - } - peers_and_metadata - } - - /// Verifies that the distance and latencies for the peers are in - /// increasing order (with the distance taking precedence over the latency). - fn verify_increasing_distance_latencies( - peers_and_metadata: &HashMap, - sorted_peers: &[PeerNetworkId], - ) { - let mut previous_latency = None; - let mut previous_distance = 0; - for sorted_peer in sorted_peers { - // Get the distance and latency for the peer - let peer_metadata = peers_and_metadata.get(sorted_peer).unwrap(); - let distance = get_distance_for_peer(sorted_peer, peer_metadata).unwrap(); - let latency = get_latency_for_peer(sorted_peer, peer_metadata); - - // Verify the order of the peers - if distance == previous_distance { - if let Some(latency) = latency { - if let Some(previous_latency) = previous_latency { - assert!(latency >= previous_latency); - } - } - } else { - assert!(distance > previous_distance); - } - - // Update the previous latency and distance - previous_latency = latency; - previous_distance = distance; - } - } -} diff --git a/consensus/src/consensus_provider.rs b/consensus/src/consensus_provider.rs index e0e12bd453085..37789380d543a 100644 --- a/consensus/src/consensus_provider.rs +++ b/consensus/src/consensus_provider.rs @@ -4,9 +4,12 @@ use crate::{ consensus_observer::{ - network_client::ConsensusObserverClient, network_events::ConsensusObserverNetworkEvents, - network_message::ConsensusObserverMessage, observer::ConsensusObserver, - publisher::ConsensusPublisher, + network::{ + network_handler::ConsensusObserverNetworkMessage, + observer_client::ConsensusObserverClient, observer_message::ConsensusObserverMessage, + }, + observer::consensus_observer::ConsensusObserver, + publisher::consensus_publisher::ConsensusPublisher, }, counters, epoch_manager::EpochManager, @@ -22,15 +25,14 @@ use crate::{ util::time_service::ClockTimeService, }; use aptos_bounded_executor::BoundedExecutor; +use aptos_channels::aptos_channel::Receiver; use aptos_config::config::NodeConfig; use aptos_consensus_notifications::ConsensusNotificationSender; use aptos_event_notifications::{DbBackedOnChainConfig, ReconfigNotificationListener}; use aptos_executor::block_executor::BlockExecutor; use aptos_logger::prelude::*; use aptos_mempool::QuorumStoreRequest; -use aptos_network::application::interface::{ - NetworkClient, NetworkClientInterface, NetworkServiceEvents, -}; +use aptos_network::application::interface::{NetworkClient, NetworkServiceEvents}; use aptos_storage_interface::DbReaderWriter; use aptos_time_service::TimeService; use aptos_validator_transaction_pool::VTxnPoolState; @@ -41,6 +43,7 @@ use std::{collections::HashMap, sync::Arc}; use tokio::runtime::Runtime; /// Helper function to start consensus based on configuration and return the runtime +#[allow(clippy::unwrap_used)] pub fn start_consensus( node_config: &NodeConfig, network_client: NetworkClient, @@ -67,6 +70,7 @@ pub fn start_consensus( state_sync_notifier, runtime.handle(), TransactionFilter::new(node_config.execution.transaction_filter.clone()), + node_config.consensus.enable_pre_commit, ); let time_service = Arc::new(ClockTimeService::new(runtime.handle().clone())); @@ -76,7 +80,10 @@ pub fn start_consensus( let (self_sender, self_receiver) = aptos_channels::new_unbounded(&counters::PENDING_SELF_MESSAGES); let consensus_network_client = ConsensusNetworkClient::new(network_client); - let bounded_executor = BoundedExecutor::new(8, runtime.handle().clone()); + let bounded_executor = BoundedExecutor::new( + node_config.consensus.num_bounded_executor_tasks as usize, + runtime.handle().clone(), + ); let rand_storage = Arc::new(RandDb::new(node_config.storage.dir())); let execution_client = Arc::new(ExecutionProxyClient::new( @@ -121,32 +128,17 @@ pub fn start_consensus( /// A helper function to start the consensus observer pub fn start_consensus_observer( node_config: &NodeConfig, - observer_network_client: NetworkClient, - observer_network_service_events: NetworkServiceEvents, + consensus_observer_runtime: &Runtime, + consensus_observer_client: Arc< + ConsensusObserverClient>, + >, + consensus_observer_message_receiver: Receiver<(), ConsensusObserverNetworkMessage>, consensus_publisher: Option>, state_sync_notifier: Arc, consensus_to_mempool_sender: mpsc::Sender, aptos_db: DbReaderWriter, reconfig_events: Option>, -) -> Runtime { - // Create a consensus observer runtime - let runtime = aptos_runtimes::spawn_named_runtime("observer".into(), None); - - // Create the consensus observer client - let consensus_observer_client = if let Some(consensus_publisher) = &consensus_publisher { - // Get the consensus observer client from the consensus publisher - consensus_publisher.get_consensus_observer_client() - } else { - // Otherwise, create a new client (the publisher is not enabled) - Arc::new(ConsensusObserverClient::new( - observer_network_client.clone(), - )) - }; - - // Create the consensus observer network events - let observer_network_events = - ConsensusObserverNetworkEvents::new(observer_network_service_events); - +) { // Create the (dummy) consensus network client let (self_sender, _self_receiver) = aptos_channels::new_unbounded(&counters::PENDING_SELF_MESSAGES); @@ -154,7 +146,7 @@ pub fn start_consensus_observer( vec![], vec![], HashMap::new(), - observer_network_client.get_peers_and_metadata(), + consensus_observer_client.get_peers_and_metadata(), )); // If the consensus observer is enabled, create the execution client. @@ -169,12 +161,14 @@ pub fn start_consensus_observer( Arc::new(BlockExecutor::::new(aptos_db.clone())), txn_notifier, state_sync_notifier, - runtime.handle(), + consensus_observer_runtime.handle(), TransactionFilter::new(node_config.execution.transaction_filter.clone()), + node_config.consensus.enable_pre_commit, ); // Create the execution proxy client - let bounded_executor = BoundedExecutor::new(32, runtime.handle().clone()); + let bounded_executor = + BoundedExecutor::new(32, consensus_observer_runtime.handle().clone()); let rand_storage = Arc::new(RandDb::new(node_config.storage.dir())); let execution_proxy_client = Arc::new(ExecutionProxyClient::new( node_config.consensus.clone(), @@ -193,20 +187,23 @@ pub fn start_consensus_observer( }; // Create the consensus observer - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + let (sync_notification_sender, sync_notification_listener) = + tokio::sync::mpsc::unbounded_channel(); let consensus_observer = ConsensusObserver::new( - node_config.consensus_observer, + node_config.clone(), consensus_observer_client, aptos_db.reader.clone(), execution_client, - tx, + sync_notification_sender, reconfig_events, consensus_publisher, TimeService::real(), ); // Start the consensus observer - runtime.spawn(consensus_observer.start(observer_network_events, rx)); - - runtime + consensus_observer_runtime.spawn(consensus_observer.start( + node_config.consensus_observer, + consensus_observer_message_receiver, + sync_notification_listener, + )); } diff --git a/consensus/src/counters.rs b/consensus/src/counters.rs index 33dab16c9e12c..1cb85ca31915b 100644 --- a/consensus/src/counters.rs +++ b/consensus/src/counters.rs @@ -2,11 +2,16 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use crate::{ block_storage::tracing::{observe_block, BlockStage}, quorum_store, }; use aptos_consensus_types::pipelined_block::PipelinedBlock; +use aptos_crypto::HashValue; +use aptos_executor_types::ExecutorError; +use aptos_logger::prelude::{error, warn}; use aptos_metrics_core::{ exponential_buckets, op_counters::DurationHistogram, register_avg_counter, register_counter, register_gauge, register_gauge_vec, register_histogram, register_histogram_vec, @@ -25,6 +30,8 @@ pub const TXN_COMMIT_SUCCESS_LABEL: &str = "success"; pub const TXN_COMMIT_FAILED_LABEL: &str = "failed"; /// Transaction commit failed (will not be retried) because of a duplicate pub const TXN_COMMIT_FAILED_DUPLICATE_LABEL: &str = "failed_duplicate"; +/// Transaction commit failed (will not be retried) because it expired +pub const TXN_COMMIT_FAILED_EXPIRED_LABEL: &str = "failed_expired"; /// Transaction commit was unsuccessful, but will be retried pub const TXN_COMMIT_RETRY_LABEL: &str = "retry"; @@ -176,6 +183,16 @@ pub static TXN_DEDUP_SECONDS: Lazy = Lazy::new(|| { .unwrap() }); +pub static BLOCK_PREPARER_LATENCY: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_execution_block_preparer_seconds", + "The time spent in block preparer", + ) + .unwrap(), + ) +}); + /// Transaction dedup number of filtered pub static TXN_DEDUP_FILTERED: Lazy = Lazy::new(|| { register_avg_counter( @@ -284,11 +301,19 @@ pub static WAIT_FOR_FULL_BLOCKS_TRIGGERED: Lazy = Lazy::new(|| { ) }); -/// Counts when chain_health backoff is triggered +/// Counts when pipeline backpressure is triggered pub static PIPELINE_BACKPRESSURE_ON_PROPOSAL_TRIGGERED: Lazy = Lazy::new(|| { register_avg_counter( "aptos_pipeline_backpressure_on_proposal_triggered", - "Counts when chain_health backoff is triggered", + "Counts when pipeline backpressure is triggered", + ) +}); + +/// Counts when execution backpressure is triggered +pub static EXECUTION_BACKPRESSURE_ON_PROPOSAL_TRIGGERED: Lazy = Lazy::new(|| { + register_avg_counter( + "aptos_execution_backpressure_on_proposal_triggered", + "Counts when execution backpressure is triggered", ) }); @@ -309,11 +334,30 @@ pub static CONSENSUS_PROPOSAL_PENDING_DURATION: Lazy = Lazy::new(|| { }); /// Amount of time (in seconds) proposal is delayed due to backpressure/backoff -pub static PROPOSER_DELAY_PROPOSAL: Lazy = Lazy::new(|| { - register_gauge!( +pub static PROPOSER_DELAY_PROPOSAL: Lazy = Lazy::new(|| { + register_avg_counter( "aptos_proposer_delay_proposal", "Amount of time (in seconds) proposal is delayed due to backpressure/backoff", ) +}); + +/// Histogram for max number of transactions (after filtering for dedup, expirations, etc) proposer uses when creating block. +pub static PROPOSER_MAX_BLOCK_TXNS_AFTER_FILTERING: Lazy = Lazy::new(|| { + register_histogram!( + "aptos_proposer_max_block_txns_after_filtering", + "Histogram for max number of transactions (after filtering) proposer uses when creating block.", + NUM_CONSENSUS_TRANSACTIONS_BUCKETS.to_vec() + ) + .unwrap() +}); + +/// Histogram for max number of transactions to execute proposer uses when creating block. +pub static PROPOSER_MAX_BLOCK_TXNS_TO_EXECUTE: Lazy = Lazy::new(|| { + register_histogram!( + "aptos_proposer_max_block_txns_to_execute", + "Histogram for max number of transactions to execute proposer uses when creating block.", + NUM_CONSENSUS_TRANSACTIONS_BUCKETS.to_vec() + ) .unwrap() }); @@ -335,6 +379,16 @@ pub static PROPOSER_PENDING_BLOCKS_FILL_FRACTION: Lazy = Lazy::new(|| { .unwrap() }); +/// Histogram for max number of transactions calibrated block should have, based on the proposer +pub static PROPOSER_ESTIMATED_CALIBRATED_BLOCK_TXNS: Lazy = Lazy::new(|| { + register_histogram!( + "aptos_proposer_estimated_calibrated_block_txns", + "Histogram for max number of transactions calibrated block should have, based on the proposer", + NUM_CONSENSUS_TRANSACTIONS_BUCKETS.to_vec() + ) + .unwrap() +}); + /// Next set of counters are computed at leader election time, with some delay. /// Current voting power fraction that participated in consensus @@ -605,9 +659,9 @@ pub static ORDER_VOTE_ADDED: Lazy = Lazy::new(|| { .unwrap() }); -pub static ORDER_VOTE_VERY_OLD: Lazy = Lazy::new(|| { +pub static ORDER_VOTE_NOT_IN_RANGE: Lazy = Lazy::new(|| { register_int_counter!( - "aptos_consensus_order_vote_very_old", + "aptos_consensus_order_vote_not_in_range", "Count of the number of order votes that are very old" ) .unwrap() @@ -766,6 +820,60 @@ pub static BLOCK_TRACING: Lazy = Lazy::new(|| { .unwrap() }); +pub static PIPELINE_INSERTION_TO_EXECUTED_TIME: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_consensus_pipeline_insertion_to_executed_time", + "Histogram for the time it takes for a block to be executed after being inserted into the pipeline" + ).unwrap() + ) +}); + +pub static PIPELINE_ENTRY_TO_INSERTED_TIME: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_consensus_pipeline_entry_to_inserted_time", + "Histogram for the time it takes for a block to be inserted into the pipeline after being received" + ).unwrap() + ) +}); + +pub static PREPARE_BLOCK_SIG_VERIFICATION_TIME: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_consensus_prepare_block_sig_verification_time", + "Histogram for the time it takes to verify the signatures of a block after it is prepared" + ).unwrap() + ) +}); + +pub static PREPARE_BLOCK_WAIT_TIME: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_consensus_prepare_block_wait_time", + "Histogram for the time the block waits after it enters the pipeline before the block prepration starts" + ).unwrap() + ) +}); + +pub static EXECUTE_BLOCK_WAIT_TIME: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_consensus_execute_block_wait_time", + "Histogram for the time the block waits after the block is prepared before the block execution starts" + ).unwrap() + ) +}); + +pub static APPLY_LEDGER_WAIT_TIME: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "aptos_consensus_apply_ledger_wait_time", + "Histogram for the time the block waits after the block is executed before the ledger is applied" + ).unwrap() + ) +}); + const CONSENSUS_WAIT_DURATION_BUCKETS: [f64; 19] = [ 0.005, 0.01, 0.015, 0.02, 0.04, 0.06, 0.08, 0.10, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25, 0.3, 0.4, 0.6, 0.8, 2.0, @@ -950,6 +1058,56 @@ pub static BUFFER_MANAGER_RETRY_COUNT: Lazy = Lazy::new(|| { .unwrap() }); +/// Count of the buffer manager receiving executor error +pub static BUFFER_MANAGER_RECEIVED_EXECUTOR_ERROR_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_consensus_buffer_manager_received_executor_error_count", + "Count of the buffer manager receiving executor error", + &["error_type"], + ) + .unwrap() +}); + +/// Count of the executor errors pipeline discarded +pub static PIPELINE_DISCARDED_EXECUTOR_ERROR_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_consensus_pipeline_discarded_executor_error_count", + "Count of the executor errors pipeline discarded", + &["error_type"], + ) + .unwrap() +}); + +pub fn log_executor_error_occurred( + e: ExecutorError, + counter: &Lazy, + block_id: HashValue, +) { + match e { + ExecutorError::CouldNotGetData => { + counter.with_label_values(&["CouldNotGetData"]).inc(); + warn!( + block_id = block_id, + "Execution error - CouldNotGetData {}", block_id + ); + }, + ExecutorError::BlockNotFound(block_id) => { + counter.with_label_values(&["BlockNotFound"]).inc(); + warn!( + block_id = block_id, + "Execution error BlockNotFound {}", block_id + ); + }, + e => { + counter.with_label_values(&["UnexpectedError"]).inc(); + error!( + block_id = block_id, + "Execution error {:?} for {}", e, block_id + ); + }, + } +} + const PROPSER_ELECTION_DURATION_BUCKETS: [f64; 17] = [ 0.001, 0.002, 0.003, 0.004, 0.006, 0.008, 0.01, 0.012, 0.014, 0.0175, 0.02, 0.025, 0.05, 0.25, 0.5, 1.0, 2.0, @@ -1071,6 +1229,8 @@ pub fn update_counters_for_committed_blocks(blocks_to_commit: &[Arc = Lazy::new(|| { ) .unwrap() }); + +pub static CONSENSUS_PROPOSAL_PAYLOAD_AVAILABILITY: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_consensus_proposal_payload_availability_count", + "The availability of proposal payload locally", + &["status"] + ) + .unwrap() +}); + +pub static CONSENSUS_PROPOSAL_PAYLOAD_FETCH_DURATION: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_consensus_proposal_payload_fetch_duration", + "Time to fetch payload behind proposal with status", + &["status"] + ) + .unwrap() +}); diff --git a/consensus/src/dag/adapter.rs b/consensus/src/dag/adapter.rs index 692cef3a5283d..36bd615345b04 100644 --- a/consensus/src/dag/adapter.rs +++ b/consensus/src/dag/adapter.rs @@ -141,7 +141,9 @@ impl OrderedNotifier for OrderedNotifierAdapter { ordered_nodes: Vec>, failed_author: Vec<(Round, Author)>, ) { - let anchor = ordered_nodes.last().unwrap(); + let anchor = ordered_nodes + .last() + .expect("ordered_nodes shuld not be empty"); let epoch = anchor.epoch(); let round = anchor.round(); let timestamp = anchor.metadata().timestamp(); @@ -385,15 +387,15 @@ impl DAGStorage for StorageAdapter { for i in 1..=std::cmp::min(k, resource.length()) { let idx = (resource.next_idx() + resource.max_capacity() - i as u32) % resource.max_capacity(); - let new_block_event = bcs::from_bytes::( - self.aptos_db - .get_state_value_by_version( - &StateKey::table_item(handle, &bcs::to_bytes(&idx).unwrap()), - version, - )? - .ok_or_else(|| format_err!("Table item doesn't exist"))? - .bytes(), - )?; + // idx is an u32, so it's not possible to fail to convert it to bytes + let idx_bytes = bcs::to_bytes(&idx) + .map_err(|e| anyhow::anyhow!("Failed to serialize index: {:?}", e))?; + let state_value = self + .aptos_db + .get_state_value_by_version(&StateKey::table_item(handle, &idx_bytes), version)? + .ok_or_else(|| anyhow::anyhow!("Table item doesn't exist"))?; + let new_block_event = bcs::from_bytes::(state_value.bytes()) + .map_err(|e| anyhow::anyhow!("Failed to deserialize NewBlockEvent: {:?}", e))?; if self .epoch_to_validators .contains_key(&new_block_event.epoch()) diff --git a/consensus/src/dag/anchor_election/leader_reputation_adapter.rs b/consensus/src/dag/anchor_election/leader_reputation_adapter.rs index 3d58d5cd31b47..5e58c803a8074 100644 --- a/consensus/src/dag/anchor_election/leader_reputation_adapter.rs +++ b/consensus/src/dag/anchor_election/leader_reputation_adapter.rs @@ -49,14 +49,25 @@ impl MetadataBackendAdapter { // TODO: we should change NewBlockEvent on LeaderReputation to take a trait fn convert(&self, event: CommitEvent) -> NewBlockEvent { - let validators = self.epoch_to_validators.get(&event.epoch()).unwrap(); + let validators = self + .epoch_to_validators + .get(&event.epoch()) + .expect("Event epoch should map back to validators!"); let mut bitvec = BitVec::with_num_bits(validators.len() as u16); for author in event.parents() { - bitvec.set(*validators.get(author).unwrap() as u16); + bitvec.set( + *validators + .get(author) + .expect("Author should be in validators set!") as u16, + ); } let mut failed_authors = vec![]; for author in event.failed_authors() { - failed_authors.push(*validators.get(author).unwrap() as u64); + failed_authors.push( + *validators + .get(author) + .expect("Author should be in validators set!") as u64, + ); } NewBlockEvent::new( AccountAddress::ZERO, diff --git a/consensus/src/dag/bootstrap.rs b/consensus/src/dag/bootstrap.rs index dca8669eb6e07..1cb4a98f21f53 100644 --- a/consensus/src/dag/bootstrap.rs +++ b/consensus/src/dag/bootstrap.rs @@ -32,7 +32,7 @@ use crate::{ monitor, network::IncomingDAGRequest, payload_client::PayloadClient, - payload_manager::PayloadManager, + payload_manager::TPayloadManager, pipeline::{buffer_manager::OrderedBlocks, execution_client::TExecutionClient}, }; use aptos_bounded_executor::BoundedExecutor; @@ -330,7 +330,7 @@ pub struct DagBootstrapper { dag_network_sender: Arc, proof_notifier: Arc, time_service: aptos_time_service::TimeService, - payload_manager: Arc, + payload_manager: Arc, payload_client: Arc, ordered_nodes_tx: UnboundedSender, execution_client: Arc, @@ -355,7 +355,7 @@ impl DagBootstrapper { dag_network_sender: Arc, proof_notifier: Arc, time_service: aptos_time_service::TimeService, - payload_manager: Arc, + payload_manager: Arc, payload_client: Arc, ordered_nodes_tx: UnboundedSender, execution_client: Arc, @@ -432,7 +432,12 @@ impl DagBootstrapper { .epoch_state .verifier .get_ordered_account_addresses_iter() - .map(|p| self.epoch_state.verifier.get_voting_power(&p).unwrap()) + .map(|p| { + self.epoch_state + .verifier + .get_voting_power(&p) + .expect("No voting power associated with AccountAddress!") + }) .collect(); Arc::new(LeaderReputationAdapter::new( @@ -621,6 +626,8 @@ impl DagBootstrapper { .health_config .pipeline_backpressure_config .clone(), + // TODO: add pipeline backpressure based on execution speed to DAG config + None, ), ordered_notifier.clone(), ); @@ -731,7 +738,7 @@ pub(super) fn bootstrap_dag_for_test( dag_network_sender: Arc, proof_notifier: Arc, time_service: aptos_time_service::TimeService, - payload_manager: Arc, + payload_manager: Arc, payload_client: Arc, execution_client: Arc, ) -> ( diff --git a/consensus/src/dag/dag_driver.rs b/consensus/src/dag/dag_driver.rs index 2c787f5b1664b..2395ba30ef264 100644 --- a/consensus/src/dag/dag_driver.rs +++ b/consensus/src/dag/dag_driver.rs @@ -21,12 +21,15 @@ use crate::{ }, DAGRpcResult, RpcHandler, }, - payload_client::PayloadClient, + payload_client::{PayloadClient, PayloadPullParameters}, }; use anyhow::{bail, ensure}; use aptos_collections::BoundedVecDeque; use aptos_config::config::DagPayloadConfig; -use aptos_consensus_types::common::{Author, Payload, PayloadFilter}; +use aptos_consensus_types::{ + common::{Author, Payload, PayloadFilter}, + utils::PayloadTxnsSize, +}; use aptos_crypto::hash::CryptoHash; use aptos_infallible::Mutex; use aptos_logger::{debug, error}; @@ -255,18 +258,23 @@ impl DagDriver { let (validator_txns, payload) = match self .payload_client .pull_payload( - Duration::from_millis(self.payload_config.payload_pull_max_poll_time_ms), - max_txns, - max_size_bytes, - // TODO: Set max_inline_items and max_inline_bytes correctly - 100, - 100 * 1024, + PayloadPullParameters { + max_poll_time: Duration::from_millis( + self.payload_config.payload_pull_max_poll_time_ms, + ), + max_txns: PayloadTxnsSize::new(max_txns, max_size_bytes), + max_txns_after_filtering: max_txns, + soft_max_txns_after_filtering: max_txns, + max_inline_txns: PayloadTxnsSize::new(100, 100 * 1024), + opt_batch_txns_pct: 0, + user_txn_filter: payload_filter, + pending_ordering: false, + pending_uncommitted_blocks: 0, + recent_max_fill_fraction: 0.0, + block_timestamp: self.time_service.now_unix_time(), + }, sys_payload_filter, - payload_filter, Box::pin(async {}), - false, - 0, - 0.0, ) .await { diff --git a/consensus/src/dag/dag_network.rs b/consensus/src/dag/dag_network.rs index e63d490cb5f01..04654385613cb 100644 --- a/consensus/src/dag/dag_network.rs +++ b/consensus/src/dag/dag_network.rs @@ -63,6 +63,9 @@ impl Responders { } fn next_to_request(&mut self) -> Option> { + // We want to immediately stop if the number generator is not returning any value. + // expect will panic if the generator is not returning any value. + #[allow(clippy::unwrap_in_result)] let count = self.generator.next().expect("should return a number"); if self.peers.is_empty() { diff --git a/consensus/src/dag/dag_state_sync.rs b/consensus/src/dag/dag_state_sync.rs index dd035e5094283..6cf55c4c5dd50 100644 --- a/consensus/src/dag/dag_state_sync.rs +++ b/consensus/src/dag/dag_state_sync.rs @@ -256,7 +256,9 @@ impl DagStateSynchronizer { self.execution_client.sync_to(commit_li).await?; - Ok(Arc::into_inner(sync_dag_store).unwrap()) + let inner = + Arc::into_inner(sync_dag_store).expect("Only one strong reference should exists"); + Ok(inner) } } diff --git a/consensus/src/dag/dag_store.rs b/consensus/src/dag/dag_store.rs index e22b23f762ae8..3fe94b0dcd128 100644 --- a/consensus/src/dag/dag_store.rs +++ b/consensus/src/dag/dag_store.rs @@ -110,6 +110,8 @@ impl InMemDag { ); let node = Arc::new(node); + // Invariant violation, we must get the node ref (COMMENT ME) + #[allow(clippy::unwrap_in_result)] let round_ref = self .get_node_ref_mut(node.round(), node.author()) .expect("must be present"); @@ -323,7 +325,11 @@ impl InMemDag { filter: impl Fn(&NodeStatus) -> bool, ) -> impl Iterator { let until = until.unwrap_or(self.lowest_round()); - let initial_round = targets.clone().map(|t| t.round()).max().unwrap(); + let initial_round = targets + .clone() + .map(|t| t.round()) + .max() + .expect("Round should be not empty!"); let initial = targets.map(|t| *t.digest()).collect(); let mut reachable_filter = Self::reachable_filter(initial); @@ -397,6 +403,8 @@ impl InMemDag { DagSnapshotBitmask::new(from_round, bitmask) } + /// unwrap is only used in debug mode + #[allow(clippy::unwrap_used)] pub(super) fn prune(&mut self) -> BTreeMap>> { let to_keep = self.nodes_by_round.split_off(&self.start_round); let to_prune = std::mem::replace(&mut self.nodes_by_round, to_keep); diff --git a/consensus/src/dag/health/chain_health.rs b/consensus/src/dag/health/chain_health.rs index 26e5d9af256be..6b9845407ae7b 100644 --- a/consensus/src/dag/health/chain_health.rs +++ b/consensus/src/dag/health/chain_health.rs @@ -84,7 +84,7 @@ impl TChainHealth for ChainHealthBackoff { chain_health_backoff.map(|value| { ( - value.max_sending_block_txns_override, + value.max_sending_block_txns_after_filtering_override, value.max_sending_block_bytes_override, ) }) diff --git a/consensus/src/dag/health/pipeline_health.rs b/consensus/src/dag/health/pipeline_health.rs index 3668fd68eec37..f3f5cf5b00fcd 100644 --- a/consensus/src/dag/health/pipeline_health.rs +++ b/consensus/src/dag/health/pipeline_health.rs @@ -68,7 +68,7 @@ impl TPipelineHealth for PipelineLatencyBasedBackpressure { let latency = self.adapter.pipeline_pending_latency(); self.pipeline_config.get_backoff(latency).map(|config| { ( - config.max_sending_block_txns_override, + config.max_sending_block_txns_after_filtering_override, config.max_sending_block_bytes_override, ) }) diff --git a/consensus/src/dag/observability/counters.rs b/consensus/src/dag/observability/counters.rs index 2abdc9ec41a15..47a40e56250a2 100644 --- a/consensus/src/dag/observability/counters.rs +++ b/consensus/src/dag/observability/counters.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use aptos_metrics_core::{ register_histogram, register_histogram_vec, register_int_gauge, Histogram, HistogramVec, IntGauge, diff --git a/consensus/src/dag/tests/helpers.rs b/consensus/src/dag/tests/helpers.rs index 9cd77cc915274..ff19b6876e2db 100644 --- a/consensus/src/dag/tests/helpers.rs +++ b/consensus/src/dag/tests/helpers.rs @@ -8,15 +8,34 @@ use crate::{ }, payload_manager::TPayloadManager, }; -use aptos_consensus_types::common::{Author, Payload, Round}; -use aptos_types::aggregate_signature::AggregateSignature; +use aptos_consensus_types::{ + block::Block, + common::{Author, Payload, Round}, +}; +use aptos_executor_types::ExecutorResult; +use aptos_types::{aggregate_signature::AggregateSignature, transaction::SignedTransaction}; +use async_trait::async_trait; pub(super) const TEST_DAG_WINDOW: u64 = 5; pub(super) struct MockPayloadManager {} +#[async_trait] impl TPayloadManager for MockPayloadManager { fn prefetch_payload_data(&self, _payload: &Payload, _timestamp: u64) {} + + fn notify_commit(&self, _block_timestamp: u64, _payloads: Vec) {} + + fn check_payload_availability(&self, _block: &Block) -> bool { + unimplemented!() + } + + async fn get_transactions( + &self, + _block: &Block, + ) -> ExecutorResult<(Vec, Option)> { + Ok((Vec::new(), None)) + } } pub(super) struct MockOrderRule {} diff --git a/consensus/src/dag/tests/integration_tests.rs b/consensus/src/dag/tests/integration_tests.rs index a0ae240cbdb75..2a19bcc3e88b7 100644 --- a/consensus/src/dag/tests/integration_tests.rs +++ b/consensus/src/dag/tests/integration_tests.rs @@ -7,7 +7,7 @@ use crate::{ network::{IncomingDAGRequest, NetworkSender, RpcResponder}, network_interface::{ConsensusMsg, ConsensusNetworkClient, DIRECT_SEND, RPC}, network_tests::{NetworkPlayground, TwinId}, - payload_manager::PayloadManager, + payload_manager::DirectMempoolPayloadManager, pipeline::{buffer_manager::OrderedBlocks, execution_client::DummyExecutionClient}, test_utils::{consensus_runtime, MockPayloadManager, MockStorage}, }; @@ -78,7 +78,7 @@ impl DagBootstrapUnit { let network = Arc::new(network); let payload_client = Arc::new(MockPayloadManager::new(None)); - let payload_manager = Arc::new(PayloadManager::DirectMempool); + let payload_manager = Arc::new(DirectMempoolPayloadManager::new()); let execution_client = Arc::new(DummyExecutionClient); diff --git a/consensus/src/dag/types.rs b/consensus/src/dag/types.rs index 0ad855ee6d45d..703ee9778d3d2 100644 --- a/consensus/src/dag/types.rs +++ b/consensus/src/dag/types.rs @@ -579,15 +579,22 @@ impl BroadcastStatus for Arc { .check_voting_power(partial_signatures.signatures().keys(), true) .is_ok() { - let aggregated_signature = self + let aggregated_signature = match self .epoch_state .verifier .aggregate_signatures(partial_signatures) - .expect("Signature aggregation should succeed"); + { + Ok(signature) => signature, + Err(_) => return Err(anyhow::anyhow!("Signature aggregation failed")), + }; observe_node(self.metadata.timestamp(), NodeStage::CertAggregated); let certificate = NodeCertificate::new(self.metadata.clone(), aggregated_signature); - _ = tx.take().expect("must exist").send(certificate); + // Invariant Violation: The one-shot channel sender must exist to send the NodeCertificate + _ = tx + .take() + .expect("The one-shot channel sender must exist to send the NodeCertificate") + .send(certificate); } if partial_signatures.signatures().len() == self.epoch_state.verifier.len() { @@ -885,7 +892,7 @@ impl TConsensusMsg for DAGMessage { fn into_network_message(self) -> ConsensusMsg { ConsensusMsg::DAGMessage(DAGNetworkMessage { epoch: self.epoch(), - data: bcs::to_bytes(&self).unwrap(), + data: bcs::to_bytes(&self).expect("ConsensusMsg should serialize to bytes"), }) } } @@ -927,7 +934,7 @@ impl TConsensusMsg for DAGRpcResult { fn into_network_message(self) -> ConsensusMsg { ConsensusMsg::DAGMessage(DAGNetworkMessage { epoch: self.epoch(), - data: bcs::to_bytes(&self).unwrap(), + data: bcs::to_bytes(&self).expect("ConsensusMsg should serialize to bytes!"), }) } } diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index f7f4344203803..26fb2a855554a 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -8,7 +8,7 @@ use crate::{ tracing::{observe_block, BlockStage}, BlockStore, }, - consensus_observer::publisher::ConsensusPublisher, + consensus_observer::publisher::consensus_publisher::ConsensusPublisher, counters, dag::{DagBootstrapper, DagCommitSigner, StorageAdapter}, error::{error_kind, DbError}, @@ -37,7 +37,7 @@ use crate::{ payload_client::{ mixed::MixedPayloadClient, user::quorum_store_client::QuorumStoreClient, PayloadClient, }, - payload_manager::PayloadManager, + payload_manager::{DirectMempoolPayloadManager, TPayloadManager}, persistent_liveness_storage::{LedgerRecoveryData, PersistentLivenessStorage, RecoveryData}, pipeline::execution_client::TExecutionClient, quorum_store::{ @@ -58,27 +58,25 @@ use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::config::{ ConsensusConfig, DagConsensusConfig, ExecutionConfig, NodeConfig, QcAggregatorType, - SafetyRulesConfig, SecureBackend, }; use aptos_consensus_types::{ common::{Author, Round}, delayed_qc_msg::DelayedQcMsg, epoch_retrieval::EpochRetrievalRequest, proof_of_store::ProofCache, + utils::PayloadTxnsSize, }; -use aptos_crypto::ed25519; +use aptos_crypto::ed25519::{Ed25519PrivateKey, PrivateKey}; use aptos_dkg::{ pvss::{traits::Transcript, Player}, weighted_vuf::traits::WeightedVUF, }; use aptos_event_notifications::ReconfigNotificationListener; -use aptos_global_constants::CONSENSUS_KEY; use aptos_infallible::{duration_since_epoch, Mutex}; use aptos_logger::prelude::*; use aptos_mempool::QuorumStoreRequest; use aptos_network::{application::interface::NetworkClient, protocols::network::Event}; -use aptos_safety_rules::SafetyRulesManager; -use aptos_secure_storage::{KVStorage, Storage}; +use aptos_safety_rules::{safety_rules_manager, PersistentSafetyStorage, SafetyRulesManager}; use aptos_types::{ account_address::AccountAddress, dkg::{real_dkg::maybe_dk_from_bls_sk, DKGState, DKGTrait, DefaultDKG}, @@ -93,6 +91,7 @@ use aptos_types::{ }, randomness::{RandKeys, WvufPP, WVUF}, validator_signer::ValidatorSigner, + validator_verifier::ValidatorVerifier, }; use aptos_validator_transaction_pool::VTxnPoolState; use fail::fail_point; @@ -172,15 +171,16 @@ pub struct EpochManager { dag_rpc_tx: Option>, dag_shutdown_tx: Option>>, dag_config: DagConsensusConfig, - payload_manager: Arc, + payload_manager: Arc, rand_storage: Arc>, proof_cache: ProofCache, consensus_publisher: Option>, pending_blocks: Arc>, + key_storage: PersistentSafetyStorage, } impl EpochManager

{ - #[allow(clippy::too_many_arguments)] + #[allow(clippy::too_many_arguments, clippy::unwrap_used)] pub(crate) fn new( node_config: &NodeConfig, time_service: Arc, @@ -204,6 +204,7 @@ impl EpochManager

{ let dag_config = node_config.dag_consensus.clone(); let sr_config = &node_config.consensus.safety_rules; let safety_rules_manager = SafetyRulesManager::new(sr_config); + let key_storage = safety_rules_manager::storage(sr_config); Self { author, config, @@ -237,7 +238,7 @@ impl EpochManager

{ dag_shutdown_tx: None, aptos_time_service, dag_config, - payload_manager: Arc::new(PayloadManager::DirectMempool), + payload_manager: Arc::new(DirectMempoolPayloadManager::new()), rand_storage, proof_cache: Cache::builder() .max_capacity(node_config.consensus.proof_cache_capacity) @@ -246,6 +247,7 @@ impl EpochManager

{ .build(), consensus_publisher, pending_blocks: Arc::new(Mutex::new(PendingBlocks::new())), + key_storage, } } @@ -344,7 +346,12 @@ impl EpochManager

{ let voting_powers: Vec<_> = if weight_by_voting_power { proposers .iter() - .map(|p| epoch_state.verifier.get_voting_power(p).unwrap()) + .map(|p| { + epoch_state + .verifier + .get_voting_power(p) + .expect("INVARIANT VIOLATION: proposer not in verifier set") + }) .collect() } else { vec![1; proposers.len()] @@ -387,7 +394,9 @@ impl EpochManager

{ }, ProposerElectionType::RoundProposer(round_proposers) => { // Hardcoded to the first proposer - let default_proposer = proposers.first().unwrap(); + let default_proposer = proposers + .first() + .expect("INVARIANT VIOLATION: proposers is empty"); Arc::new(RoundProposer::new( round_proposers.clone(), *default_proposer, @@ -672,7 +681,11 @@ impl EpochManager

{ epoch_state: &EpochState, network_sender: NetworkSender, consensus_config: &OnChainConsensusConfig, - ) -> (Arc, QuorumStoreClient, QuorumStoreBuilder) { + ) -> ( + Arc, + QuorumStoreClient, + QuorumStoreBuilder, + ) { // Start QuorumStore let (consensus_to_quorum_store_tx, consensus_to_quorum_store_rx) = mpsc::channel(self.config.intra_consensus_channel_buffer_size); @@ -747,6 +760,7 @@ impl EpochManager

{ async fn start_round_manager( &mut self, + consensus_key: Option>, recovery_data: RecoveryData, epoch_state: Arc, onchain_consensus_config: OnChainConsensusConfig, @@ -755,7 +769,7 @@ impl EpochManager

{ onchain_jwk_consensus_config: OnChainJWKConsensusConfig, network_sender: Arc, payload_client: Arc, - payload_manager: Arc, + payload_manager: Arc, rand_config: Option, fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, @@ -794,13 +808,16 @@ impl EpochManager

{ self.create_proposer_election(&epoch_state, &onchain_consensus_config); let chain_health_backoff_config = ChainHealthBackoffConfig::new(self.config.chain_health_backoff.clone()); - let pipeline_backpressure_config = - PipelineBackpressureConfig::new(self.config.pipeline_backpressure.clone()); + let pipeline_backpressure_config = PipelineBackpressureConfig::new( + self.config.pipeline_backpressure.clone(), + self.config.execution_backpressure.clone(), + ); let safety_rules_container = Arc::new(Mutex::new(safety_rules)); self.execution_client .start_epoch( + consensus_key, epoch_state.clone(), safety_rules_container.clone(), payload_manager.clone(), @@ -838,11 +855,18 @@ impl EpochManager

{ payload_client, self.time_service.clone(), Duration::from_millis(self.config.quorum_store_poll_time_ms), - self.config.max_sending_block_txns, - self.config.max_sending_block_bytes, - self.config.max_sending_inline_txns, - self.config.max_sending_inline_bytes, + PayloadTxnsSize::new( + self.config.max_sending_block_txns, + self.config.max_sending_block_bytes, + ), + self.config.max_sending_block_txns_after_filtering, + PayloadTxnsSize::new( + self.config.max_sending_inline_txns, + self.config.max_sending_inline_bytes, + ), onchain_consensus_config.max_failed_authors_to_store(), + self.config + .min_max_txns_in_block_after_filtering_from_backpressure, pipeline_backpressure_config, chain_health_backoff_config, self.quorum_store_enabled, @@ -919,6 +943,7 @@ impl EpochManager

{ fn try_get_rand_config_for_new_epoch( &self, + maybe_consensus_key: Option>, new_epoch_state: &EpochState, onchain_randomness_config: &OnChainRandomnessConfig, maybe_dkg_state: anyhow::Result, @@ -947,8 +972,10 @@ impl EpochManager

{ .copied() .ok_or_else(|| NoRandomnessReason::NotInValidatorSet)?; - let dkg_decrypt_key = load_dkg_decrypt_key(&self.config.safety_rules) - .ok_or_else(|| NoRandomnessReason::DKGDecryptKeyUnavailable)?; + let consensus_key = + maybe_consensus_key.ok_or(NoRandomnessReason::ConsensusKeyUnavailable)?; + let dkg_decrypt_key = maybe_dk_from_bls_sk(consensus_key.as_ref()) + .map_err(NoRandomnessReason::ErrConvertingConsensusKeyToDecryptionKey)?; let transcript = bcs::from_bytes::<::Transcript>( dkg_session.transcript.as_slice(), ) @@ -988,8 +1015,13 @@ impl EpochManager

{ .map_err(NoRandomnessReason::RandDbNotAvailable)? .filter(|(epoch, _)| *epoch == new_epoch) { + info!(epoch = new_epoch, "Recovering existing augmented key"); bcs::from_bytes(&key_pair).map_err(NoRandomnessReason::KeyPairDeserializationError)? } else { + info!( + epoch = new_epoch_state.epoch, + "Generating a new augmented key" + ); let mut rng = StdRng::from_rng(thread_rng()).map_err(NoRandomnessReason::RngCreationError)?; let augmented_key_pair = WVUF::augment_key_pair(&vuf_pp, sk.main, pk.main, &mut rng); @@ -1112,7 +1144,17 @@ impl EpochManager

{ // `jwk_consensus_config` not yet initialized, falling back to the old configs. Self::equivalent_jwk_consensus_config_from_deprecated_resources(&payload) }); + + let loaded_consensus_key = match self.load_consensus_key(&epoch_state.verifier) { + Ok(k) => Some(Arc::new(k)), + Err(e) => { + warn!("load_consensus_key failed: {e}"); + None + }, + }; + let rand_configs = self.try_get_rand_config_for_new_epoch( + loaded_consensus_key.clone(), &epoch_state, &onchain_randomness_config, dkg_state, @@ -1159,6 +1201,7 @@ impl EpochManager

{ if consensus_config.is_dag_enabled() { self.start_new_epoch_with_dag( epoch_state, + loaded_consensus_key.clone(), consensus_config, execution_config, onchain_randomness_config, @@ -1173,6 +1216,7 @@ impl EpochManager

{ .await } else { self.start_new_epoch_with_joltean( + loaded_consensus_key.clone(), epoch_state, consensus_config, execution_config, @@ -1193,7 +1237,11 @@ impl EpochManager

{ &mut self, epoch_state: &EpochState, consensus_config: &OnChainConsensusConfig, - ) -> (NetworkSender, Arc, Arc) { + ) -> ( + NetworkSender, + Arc, + Arc, + ) { self.set_epoch_start_metrics(epoch_state); self.quorum_store_enabled = self.enable_quorum_store(consensus_config); let network_sender = self.create_network_sender(epoch_state); @@ -1217,6 +1265,7 @@ impl EpochManager

{ async fn start_new_epoch_with_joltean( &mut self, + consensus_key: Option>, epoch_state: Arc, consensus_config: OnChainConsensusConfig, execution_config: OnChainExecutionConfig, @@ -1224,7 +1273,7 @@ impl EpochManager

{ jwk_consensus_config: OnChainJWKConsensusConfig, network_sender: NetworkSender, payload_client: Arc, - payload_manager: Arc, + payload_manager: Arc, rand_config: Option, fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, @@ -1233,6 +1282,7 @@ impl EpochManager

{ LivenessStorageData::FullRecoveryData(initial_data) => { self.recovery_mode = false; self.start_round_manager( + consensus_key, initial_data, epoch_state, consensus_config, @@ -1264,21 +1314,25 @@ impl EpochManager

{ async fn start_new_epoch_with_dag( &mut self, epoch_state: Arc, + loaded_consensus_key: Option>, onchain_consensus_config: OnChainConsensusConfig, on_chain_execution_config: OnChainExecutionConfig, onchain_randomness_config: OnChainRandomnessConfig, onchain_jwk_consensus_config: OnChainJWKConsensusConfig, network_sender: NetworkSender, payload_client: Arc, - payload_manager: Arc, + payload_manager: Arc, rand_config: Option, fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, ) { let epoch = epoch_state.epoch; - let consensus_key = new_consensus_key_from_storage(&self.config.safety_rules.backend) - .expect("unable to get private key"); - let signer = Arc::new(ValidatorSigner::new(self.author, consensus_key)); + let signer = Arc::new(ValidatorSigner::new( + self.author, + loaded_consensus_key + .clone() + .expect("unable to get private key"), + )); let commit_signer = Arc::new(DagCommitSigner::new(signer.clone())); assert!( @@ -1289,12 +1343,13 @@ impl EpochManager

{ .storage .aptos_db() .get_latest_ledger_info() - .unwrap() + .expect("unable to get latest ledger info") .commit_info() .round(); self.execution_client .start_epoch( + loaded_consensus_key, epoch_state.clone(), commit_signer, payload_manager.clone(), @@ -1337,7 +1392,9 @@ impl EpochManager

{ self.aptos_time_service.clone(), payload_manager, payload_client, - self.execution_client.get_execution_channel().unwrap(), + self.execution_client + .get_execution_channel() + .expect("unable to get execution channel"), self.execution_client.clone(), onchain_consensus_config.quorum_store_enabled(), onchain_consensus_config.effective_validator_txn_config(), @@ -1388,7 +1445,10 @@ impl EpochManager

{ Err(err) => return Err(err), } // same epoch -> run well-formedness + signature check - let epoch_state = self.epoch_state.clone().unwrap(); + let epoch_state = self + .epoch_state + .clone() + .ok_or_else(|| anyhow::anyhow!("Epoch state is not available"))?; let proof_cache = self.proof_cache.clone(); let quorum_store_enabled = self.quorum_store_enabled; let quorum_store_msg_tx = self.quorum_store_msg_tx.clone(); @@ -1549,7 +1609,7 @@ impl EpochManager

{ buffered_proposal_tx: Option>, peer_id: AccountAddress, event: VerifiedEvent, - payload_manager: Arc, + payload_manager: Arc, pending_blocks: Arc>, ) { if let VerifiedEvent::ProposalMsg(proposal) = &event { @@ -1721,66 +1781,27 @@ impl EpochManager

{ let oidc_providers = payload.get::().ok(); OnChainJWKConsensusConfig::from((features, oidc_providers)) } -} - -fn new_consensus_key_from_storage(backend: &SecureBackend) -> anyhow::Result { - let storage: Storage = backend.into(); - storage - .available() - .map_err(|e| anyhow!("Storage is not available: {e}"))?; - storage - .get(CONSENSUS_KEY) - .map(|v| v.value) - .map_err(|e| anyhow!("storage get and map err: {e}")) -} - -fn load_dkg_decrypt_key_from_identity_blob( - config: &SafetyRulesConfig, -) -> anyhow::Result<::NewValidatorDecryptKey> { - let identity_blob = config.initial_safety_rules_config.identity_blob()?; - identity_blob.try_into_dkg_new_validator_decrypt_key() -} - -fn load_dkg_decrypt_key_from_secure_storage( - config: &SafetyRulesConfig, -) -> anyhow::Result<::NewValidatorDecryptKey> { - let consensus_key = new_consensus_key_from_storage(&config.backend)?; - maybe_dk_from_bls_sk(&consensus_key) -} - -fn load_dkg_decrypt_key( - config: &SafetyRulesConfig, -) -> Option<::NewValidatorDecryptKey> { - match load_dkg_decrypt_key_from_secure_storage(config) { - Ok(dk) => { - return Some(dk); - }, - Err(e) => { - warn!("{e}"); - }, - } - match load_dkg_decrypt_key_from_identity_blob(config) { - Ok(dk) => { - return Some(dk); - }, - Err(e) => { - warn!("{e}"); - }, + fn load_consensus_key(&self, vv: &ValidatorVerifier) -> anyhow::Result { + let pk = vv + .get_public_key(&self.author) + .ok_or_else(|| anyhow!("i am not in the validator set!"))?; + self.key_storage + .consensus_sk_by_pk(pk) + .map_err(|e| anyhow!("could not find sk by pk: {:?}", e)) } - - None } #[derive(Debug)] -enum NoRandomnessReason { +pub enum NoRandomnessReason { VTxnDisabled, FeatureDisabled, DKGStateResourceMissing(anyhow::Error), DKGCompletedSessionResourceMissing, CompletedSessionTooOld, NotInValidatorSet, - DKGDecryptKeyUnavailable, + ConsensusKeyUnavailable, + ErrConvertingConsensusKeyToDecryptionKey(anyhow::Error), TranscriptDeserializationError(bcs::Error), SecretShareDecryptionFailed(anyhow::Error), RngCreationError(rand::Error), diff --git a/consensus/src/execution_pipeline.rs b/consensus/src/execution_pipeline.rs index 53a50f01a4659..2c5c4ae94e14c 100644 --- a/consensus/src/execution_pipeline.rs +++ b/consensus/src/execution_pipeline.rs @@ -5,17 +5,19 @@ use crate::{ block_preparer::BlockPreparer, + counters::{self, log_executor_error_occurred}, monitor, - state_computer::{PipelineExecutionResult, StateComputeResultFut}, + pipeline::pipeline_phase::CountedRequest, + state_computer::StateComputeResultFut, }; -use aptos_consensus_types::block::Block; +use aptos_consensus_types::{block::Block, pipeline_execution_result::PipelineExecutionResult}; use aptos_crypto::HashValue; use aptos_executor_types::{ state_checkpoint_output::StateCheckpointOutput, BlockExecutorTrait, ExecutorError, ExecutorResult, }; use aptos_experimental_runtimes::thread_manager::optimal_min_len; -use aptos_logger::{debug, error}; +use aptos_logger::{debug, warn}; use aptos_types::{ block_executor::{config::BlockExecutorConfigFromOnchain, partitioner::ExecutableBlock}, block_metadata_ext::BlockMetadataExt, @@ -24,11 +26,16 @@ use aptos_types::{ }, }; use fail::fail_point; +use futures::future::BoxFuture; use once_cell::sync::Lazy; use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; -use std::sync::Arc; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use tokio::sync::{mpsc, oneshot}; +#[allow(clippy::unwrap_used)] pub static SIG_VERIFY_POOL: Lazy> = Lazy::new(|| { Arc::new( rayon::ThreadPoolBuilder::new() @@ -44,10 +51,16 @@ pub struct ExecutionPipeline { } impl ExecutionPipeline { - pub fn spawn(executor: Arc, runtime: &tokio::runtime::Handle) -> Self { + pub fn spawn( + executor: Arc, + runtime: &tokio::runtime::Handle, + enable_pre_commit: bool, + ) -> Self { let (prepare_block_tx, prepare_block_rx) = mpsc::unbounded_channel(); let (execute_block_tx, execute_block_rx) = mpsc::unbounded_channel(); let (ledger_apply_tx, ledger_apply_rx) = mpsc::unbounded_channel(); + let (pre_commit_tx, pre_commit_rx) = mpsc::unbounded_channel(); + runtime.spawn(Self::prepare_block_stage( prepare_block_rx, execute_block_tx, @@ -57,7 +70,14 @@ impl ExecutionPipeline { ledger_apply_tx, executor.clone(), )); - runtime.spawn(Self::ledger_apply_stage(ledger_apply_rx, executor)); + runtime.spawn(Self::ledger_apply_stage( + ledger_apply_rx, + pre_commit_tx, + executor.clone(), + enable_pre_commit, + )); + runtime.spawn(Self::pre_commit_stage(pre_commit_rx, executor)); + Self { prepare_block_tx } } @@ -68,6 +88,7 @@ impl ExecutionPipeline { parent_block_id: HashValue, txn_generator: BlockPreparer, block_executor_onchain_config: BlockExecutorConfigFromOnchain, + lifetime_guard: CountedRequest<()>, ) -> StateComputeResultFut { let (result_tx, result_rx) = oneshot::channel(); let block_id = block.id(); @@ -79,6 +100,8 @@ impl ExecutionPipeline { parent_block_id, block_preparer: txn_generator, result_tx, + command_creation_time: Instant::now(), + lifetime_guard, }) .expect("Failed to send block to execution pipeline."); @@ -105,26 +128,24 @@ impl ExecutionPipeline { parent_block_id, block_preparer, result_tx, + command_creation_time, + lifetime_guard, } = command; - + counters::PREPARE_BLOCK_WAIT_TIME.observe_duration(command_creation_time.elapsed()); debug!("prepare_block received block {}.", block.id()); let input_txns = block_preparer.prepare_block(&block).await; if let Err(e) = input_txns { - result_tx.send(Err(e)).unwrap_or_else(|err| { - error!( - block_id = block.id(), - "Failed to send back execution result for block {}: {:?}.", - block.id(), - err, - ); - }); + result_tx + .send(Err(e)) + .unwrap_or_else(log_failed_to_send_result("prepare_block", block.id())); return; } let validator_txns = block.validator_txns().cloned().unwrap_or_default(); - let input_txns = input_txns.unwrap(); + let input_txns = input_txns.expect("input_txns must be Some."); tokio::task::spawn_blocking(move || { let txns_to_execute = Block::combine_to_input_transactions(validator_txns, input_txns.clone(), metadata); + let sig_verification_start = Instant::now(); let sig_verified_txns: Vec = SIG_VERIFY_POOL.install(|| { let num_txns = txns_to_execute.len(); @@ -134,6 +155,8 @@ impl ExecutionPipeline { .map(|t| t.into()) .collect::>() }); + counters::PREPARE_BLOCK_SIG_VERIFICATION_TIME + .observe_duration(sig_verification_start.elapsed()); execute_block_tx .send(ExecuteBlockCommand { input_txns, @@ -141,6 +164,8 @@ impl ExecutionPipeline { parent_block_id, block_executor_onchain_config, result_tx, + command_creation_time: Instant::now(), + lifetime_guard, }) .expect("Failed to send block to execution pipeline."); }) @@ -172,8 +197,11 @@ impl ExecutionPipeline { parent_block_id, block_executor_onchain_config, result_tx, + command_creation_time, + lifetime_guard, }) = block_rx.recv().await { + counters::EXECUTE_BLOCK_WAIT_TIME.observe_duration(command_creation_time.elapsed()); let block_id = block.block_id; debug!("execute_stage received block {}.", block_id); let executor = executor.clone(); @@ -185,11 +213,14 @@ impl ExecutionPipeline { error: "Injected error in compute".into(), }) }); - executor.execute_and_state_checkpoint( - block, - parent_block_id, - block_executor_onchain_config, - ) + let start = Instant::now(); + executor + .execute_and_state_checkpoint( + block, + parent_block_id, + block_executor_onchain_config, + ) + .map(|output| (output, start.elapsed())) }) .await ) @@ -202,6 +233,8 @@ impl ExecutionPipeline { parent_block_id, state_checkpoint_output, result_tx, + command_creation_time: Instant::now(), + lifetime_guard, }) .expect("Failed to send block to ledger_apply stage."); } @@ -210,39 +243,109 @@ impl ExecutionPipeline { async fn ledger_apply_stage( mut block_rx: mpsc::UnboundedReceiver, + pre_commit_tx: mpsc::UnboundedSender, executor: Arc, + enable_pre_commit: bool, ) { while let Some(LedgerApplyCommand { input_txns, block_id, parent_block_id, - state_checkpoint_output, + state_checkpoint_output: execution_result, result_tx, + command_creation_time, + lifetime_guard, }) = block_rx.recv().await { + counters::APPLY_LEDGER_WAIT_TIME.observe_duration(command_creation_time.elapsed()); debug!("ledger_apply stage received block {}.", block_id); let res = async { + let (state_checkpoint_output, execution_duration) = execution_result?; let executor = executor.clone(); monitor!( "ledger_apply", tokio::task::spawn_blocking(move || { - executor.ledger_update(block_id, parent_block_id, state_checkpoint_output?) + executor.ledger_update(block_id, parent_block_id, state_checkpoint_output) }) + .await ) - .await .expect("Failed to spawn_blocking().") + .map(|output| (output, execution_duration)) } .await; - let pipe_line_res = res.map(|output| PipelineExecutionResult::new(input_txns, output)); - result_tx.send(pipe_line_res).unwrap_or_else(|err| { - error!( - block_id = block_id, - "Failed to send back execution result for block {}: {:?}", block_id, err, - ); + let pipeline_res = res.map(|(output, execution_duration)| { + let pre_commit_fut: BoxFuture<'static, ExecutorResult<()>> = + if output.epoch_state().is_some() || !enable_pre_commit { + // hack: it causes issue if pre-commit is finished at an epoch ending, and + // we switch to state sync, so we do the pre-commit only after we actually + // decide to commit (in the commit phase) + let executor = executor.clone(); + Box::pin(async move { + tokio::task::spawn_blocking(move || { + executor.pre_commit_block(block_id, parent_block_id) + }) + .await + .expect("failed to spawn_blocking") + }) + } else { + // kick off pre-commit right away + let (pre_commit_result_tx, pre_commit_result_rx) = oneshot::channel(); + // schedule pre-commit + pre_commit_tx + .send(PreCommitCommand { + block_id, + parent_block_id, + result_tx: pre_commit_result_tx, + lifetime_guard, + }) + .expect("Failed to send block to pre_commit stage."); + Box::pin(async { + pre_commit_result_rx + .await + .map_err(ExecutorError::internal_err)? + }) + }; + + PipelineExecutionResult::new(input_txns, output, execution_duration, pre_commit_fut) }); + result_tx + .send(pipeline_res) + .unwrap_or_else(log_failed_to_send_result("ledger_apply", block_id)); } debug!("ledger_apply stage quitting."); } + + async fn pre_commit_stage( + mut block_rx: mpsc::UnboundedReceiver, + executor: Arc, + ) { + while let Some(PreCommitCommand { + block_id, + parent_block_id, + result_tx, + lifetime_guard, + }) = block_rx.recv().await + { + debug!("pre_commit stage received block {}.", block_id); + let res = async { + let executor = executor.clone(); + monitor!( + "pre_commit", + tokio::task::spawn_blocking(move || { + executor.pre_commit_block(block_id, parent_block_id) + }) + ) + .await + .expect("Failed to spawn_blocking().") + } + .await; + result_tx + .send(res) + .unwrap_or_else(log_failed_to_send_result("pre_commit", block_id)); + drop(lifetime_guard); + } + debug!("pre_commit stage quitting."); + } } struct PrepareBlockCommand { @@ -253,6 +356,8 @@ struct PrepareBlockCommand { parent_block_id: HashValue, block_preparer: BlockPreparer, result_tx: oneshot::Sender>, + command_creation_time: Instant, + lifetime_guard: CountedRequest<()>, } struct ExecuteBlockCommand { @@ -261,12 +366,45 @@ struct ExecuteBlockCommand { parent_block_id: HashValue, block_executor_onchain_config: BlockExecutorConfigFromOnchain, result_tx: oneshot::Sender>, + command_creation_time: Instant, + lifetime_guard: CountedRequest<()>, } struct LedgerApplyCommand { input_txns: Vec, block_id: HashValue, parent_block_id: HashValue, - state_checkpoint_output: ExecutorResult, + state_checkpoint_output: ExecutorResult<(StateCheckpointOutput, Duration)>, result_tx: oneshot::Sender>, + command_creation_time: Instant, + lifetime_guard: CountedRequest<()>, +} + +struct PreCommitCommand { + block_id: HashValue, + parent_block_id: HashValue, + result_tx: oneshot::Sender>, + lifetime_guard: CountedRequest<()>, +} + +fn log_failed_to_send_result( + from_stage: &'static str, + block_id: HashValue, +) -> impl FnOnce(ExecutorResult) { + move |value| { + warn!( + from_stage = from_stage, + block_id = block_id, + is_err = value.is_err(), + "Failed to send back execution/pre_commit result. (rx dropped)", + ); + if let Err(e) = value { + // receive channel discarding error, log for debugging. + log_executor_error_occurred( + e, + &counters::PIPELINE_DISCARDED_EXECUTOR_ERROR_COUNT, + block_id, + ); + } + } } diff --git a/consensus/src/liveness/leader_reputation.rs b/consensus/src/liveness/leader_reputation.rs index 099da670faefd..dfeabed8181bd 100644 --- a/consensus/src/liveness/leader_reputation.rs +++ b/consensus/src/liveness/leader_reputation.rs @@ -55,7 +55,7 @@ pub struct AptosDBBackend { window_size: usize, seek_len: usize, aptos_db: Arc, - db_result: Mutex<(Vec, u64, bool)>, + db_result: Mutex, u64, bool)>>, } impl AptosDBBackend { @@ -64,13 +64,13 @@ impl AptosDBBackend { window_size, seek_len, aptos_db, - db_result: Mutex::new((vec![], 0u64, true)), + db_result: Mutex::new(None), } } fn refresh_db_result( &self, - mut locked: MutexGuard<'_, (Vec, u64, bool)>, + locked: &mut MutexGuard<'_, Option<(Vec, u64, bool)>>, latest_db_version: u64, ) -> Result<(Vec, u64, bool)> { // assumes target round is not too far from latest commit @@ -97,7 +97,7 @@ impl AptosDBBackend { std::cmp::max(latest_db_version, max_returned_version), hit_end, ); - *locked = result.clone(); + **locked = Some(result.clone()); Ok(result) } @@ -173,18 +173,30 @@ impl MetadataBackend for AptosDBBackend { target_epoch: u64, target_round: Round, ) -> (Vec, HashValue) { - let locked = self.db_result.lock(); - let events = &locked.0; - let version = locked.1; - let hit_end = locked.2; + let mut locked = self.db_result.lock(); + let latest_db_version = self.aptos_db.get_latest_ledger_info_version().unwrap_or(0); + // lazy init db_result + if locked.is_none() { + if let Err(e) = self.refresh_db_result(&mut locked, latest_db_version) { + warn!( + error = ?e, "[leader reputation] Fail to initialize db result", + ); + return (vec![], HashValue::zero()); + } + } + let (events, version, hit_end) = { + // locked is somenthing + #[allow(clippy::unwrap_used)] + let result = locked.as_ref().unwrap(); + (&result.0, result.1, result.2) + }; let has_larger = events.first().map_or(false, |e| { (e.event.epoch(), e.event.round()) >= (target_epoch, target_round) }); - let latest_db_version = self.aptos_db.get_latest_ledger_info_version().unwrap_or(0); // check if fresher data has potential to give us different result if !has_larger && version < latest_db_version { - let fresh_db_result = self.refresh_db_result(locked, latest_db_version); + let fresh_db_result = self.refresh_db_result(&mut locked, latest_db_version); match fresh_db_result { Ok((events, _version, hit_end)) => { self.get_from_db_result(target_epoch, target_round, &events, hit_end) @@ -296,16 +308,8 @@ impl NewBlockEventAggregation { &history[start..] } else { - if !history.is_empty() { - assert!( - ( - history.first().unwrap().epoch(), - history.first().unwrap().round() - ) >= ( - history.last().unwrap().epoch(), - history.last().unwrap().round() - ) - ); + if let (Some(first), Some(last)) = (history.first(), history.last()) { + assert!((first.epoch(), first.round()) >= (last.epoch(), last.round())); } let end = if history.len() > window_size { window_size @@ -594,7 +598,10 @@ impl LeaderReputation { history: &[NewBlockEvent], round: Round, ) -> VotingPowerRatio { - let candidates = self.epoch_to_proposers.get(&self.epoch).unwrap(); + let candidates = self + .epoch_to_proposers + .get(&self.epoch) + .expect("Epoch should always map to proposers"); // use f64 counter, as total voting power is u128 let total_voting_power = self.voting_powers.iter().map(|v| *v as f64).sum(); CHAIN_HEALTH_TOTAL_VOTING_POWER.set(total_voting_power); diff --git a/consensus/src/liveness/proposal_generator.rs b/consensus/src/liveness/proposal_generator.rs index ea2145a62d93a..334b0a76fbf4e 100644 --- a/consensus/src/liveness/proposal_generator.rs +++ b/consensus/src/liveness/proposal_generator.rs @@ -2,32 +2,38 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{ - proposer_election::ProposerElection, unequivocal_proposer_election::UnequivocalProposerElection, -}; +use super::proposer_election::ProposerElection; use crate::{ block_storage::BlockReader, counters::{ - CHAIN_HEALTH_BACKOFF_TRIGGERED, PIPELINE_BACKPRESSURE_ON_PROPOSAL_TRIGGERED, - PROPOSER_DELAY_PROPOSAL, PROPOSER_PENDING_BLOCKS_COUNT, + CHAIN_HEALTH_BACKOFF_TRIGGERED, EXECUTION_BACKPRESSURE_ON_PROPOSAL_TRIGGERED, + PIPELINE_BACKPRESSURE_ON_PROPOSAL_TRIGGERED, PROPOSER_DELAY_PROPOSAL, + PROPOSER_ESTIMATED_CALIBRATED_BLOCK_TXNS, PROPOSER_MAX_BLOCK_TXNS_AFTER_FILTERING, + PROPOSER_MAX_BLOCK_TXNS_TO_EXECUTE, PROPOSER_PENDING_BLOCKS_COUNT, PROPOSER_PENDING_BLOCKS_FILL_FRACTION, }, - payload_client::PayloadClient, + payload_client::{PayloadClient, PayloadPullParameters}, util::time_service::TimeService, }; use anyhow::{bail, ensure, format_err, Context}; -use aptos_config::config::{ChainHealthBackoffValues, PipelineBackpressureValues}; +use aptos_config::config::{ + ChainHealthBackoffValues, ExecutionBackpressureConfig, PipelineBackpressureValues, +}; use aptos_consensus_types::{ block::Block, block_data::BlockData, common::{Author, Payload, PayloadFilter, Round}, + pipelined_block::ExecutionSummary, quorum_cert::QuorumCert, + utils::PayloadTxnsSize, }; use aptos_crypto::{hash::CryptoHash, HashValue}; +use aptos_infallible::Mutex; use aptos_logger::{error, sample, sample::SampleRate, warn}; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; use aptos_validator_transaction_pool as vtxn_pool; use futures::future::BoxFuture; +use itertools::Itertools; use std::{ collections::{BTreeMap, HashSet}, sync::Arc, @@ -95,23 +101,31 @@ impl ChainHealthBackoffConfig { #[derive(Clone)] pub struct PipelineBackpressureConfig { backoffs: BTreeMap, + execution: Option, } impl PipelineBackpressureConfig { - pub fn new(backoffs: Vec) -> Self { + pub fn new( + backoffs: Vec, + execution: Option, + ) -> Self { let original_len = backoffs.len(); let backoffs = backoffs .into_iter() .map(|v| (v.back_pressure_pipeline_latency_limit_ms, v)) .collect::>(); assert_eq!(original_len, backoffs.len()); - Self { backoffs } + Self { + backoffs, + execution, + } } #[allow(dead_code)] pub fn new_no_backoff() -> Self { Self { backoffs: BTreeMap::new(), + execution: None, } } @@ -138,6 +152,74 @@ impl PipelineBackpressureConfig { v }) } + + pub fn get_execution_block_size_backoff( + &self, + block_execution_times: &[ExecutionSummary], + max_block_txns: u64, + ) -> Option { + self.execution.as_ref().and_then(|config| { + let sizes = block_execution_times + .iter() + .flat_map(|summary| { + // for each block, compute target (re-calibrated) block size + + let execution_time_ms = summary.execution_time.as_millis(); + // Only block above the time threshold are considered giving enough signal to support calibration + // so we filter out shorter locks + if execution_time_ms > config.min_block_time_ms_to_activate as u128 + && summary.payload_len > 0 + { + // TODO: After cost of "retries" is reduced with execution pool, we + // should be computing block gas limit here, simply as: + // `config.target_block_time_ms / execution_time_ms * gas_consumed_by_block`` + // + // Until then, we need to compute wanted block size to create. + // Unfortunatelly, there is multiple layers where transactions are filtered. + // After deduping/reordering logic is applied, max_txns_to_execute limits the transactions + // passed to executor (`summary.payload_len` here), and then some are discarded for various + // reasons, which we approximate are cheaply ignored. + // For the rest, only `summary.to_commit` fraction of `summary.to_commit + summary.to_retry` + // was executed. And so assuming same discard rate, we scale `summary.payload_len` with it. + Some( + ((config.target_block_time_ms as f64 / execution_time_ms as f64 + * (summary.to_commit as f64 + / (summary.to_commit + summary.to_retry) as f64) + * summary.payload_len as f64) + .floor() as u64) + .max(1), + ) + } else { + None + } + }) + .sorted() + .collect::>(); + if sizes.len() >= config.min_blocks_to_activate { + let calibrated_block_size = (*sizes + .get(((config.percentile * sizes.len() as f64) as usize).min(sizes.len() - 1)) + .expect("guaranteed to be within vector size")) + .max(config.min_calibrated_txns_per_block); + PROPOSER_ESTIMATED_CALIBRATED_BLOCK_TXNS.observe(calibrated_block_size as f64); + // Check if calibrated block size is reduction in size, to turn on backpressure. + if max_block_txns > calibrated_block_size { + warn!( + block_execution_times = format!("{:?}", block_execution_times), + estimated_calibrated_block_sizes = format!("{:?}", sizes), + calibrated_block_size = calibrated_block_size, + "Execution backpressure recalibration: proposing reducing from {} to {}", + max_block_txns, + calibrated_block_size, + ); + Some(calibrated_block_size) + } else { + None + } + } else { + None + } + }) + } } /// ProposalGenerator is responsible for generating the proposed block on demand: it's typically @@ -162,22 +244,25 @@ pub struct ProposalGenerator { time_service: Arc, // Max time for preparation of the proposal quorum_store_poll_time: Duration, - // Max number of transactions to be added to a proposed block. - max_block_txns: u64, - // Max number of bytes to be added to a proposed block. - max_block_bytes: u64, - // Max number of inline transactions to be added to a proposed block. - max_inline_txns: u64, - // Max number of inline bytes to be added to a proposed block. - max_inline_bytes: u64, + // Max number of transactions (count, bytes) to be added to a proposed block. + max_block_txns: PayloadTxnsSize, + // Max number of unique transactions to be added to a proposed block. + max_block_txns_after_filtering: u64, + // Max number of inline transactions (count, bytes) to be added to a proposed block. + max_inline_txns: PayloadTxnsSize, // Max number of failed authors to be added to a proposed block. max_failed_authors_to_store: usize, + /// If backpressure target block size is below it, update `max_txns_to_execute` instead. + /// Applied to execution, pipeline and chain health backpressure. + /// Needed as we cannot subsplit QS batches. + min_max_txns_in_block_after_filtering_from_backpressure: u64, + pipeline_backpressure_config: PipelineBackpressureConfig, chain_health_backoff_config: ChainHealthBackoffConfig, // Last round that a proposal was generated - last_round_generated: Round, + last_round_generated: Mutex, quorum_store_enabled: bool, vtxn_config: ValidatorTxnConfig, @@ -192,11 +277,11 @@ impl ProposalGenerator { payload_client: Arc, time_service: Arc, quorum_store_poll_time: Duration, - max_block_txns: u64, - max_block_bytes: u64, - max_inline_txns: u64, - max_inline_bytes: u64, + max_block_txns: PayloadTxnsSize, + max_block_txns_after_filtering: u64, + max_inline_txns: PayloadTxnsSize, max_failed_authors_to_store: usize, + min_max_txns_in_block_after_filtering_from_backpressure: u64, pipeline_backpressure_config: PipelineBackpressureConfig, chain_health_backoff_config: ChainHealthBackoffConfig, quorum_store_enabled: bool, @@ -210,13 +295,13 @@ impl ProposalGenerator { time_service, quorum_store_poll_time, max_block_txns, - max_block_bytes, + max_block_txns_after_filtering, + min_max_txns_in_block_after_filtering_from_backpressure, max_inline_txns, - max_inline_bytes, max_failed_authors_to_store, pipeline_backpressure_config, chain_health_backoff_config, - last_round_generated: 0, + last_round_generated: Mutex::new(0), quorum_store_enabled, vtxn_config, allow_batches_without_pos_in_proposal, @@ -231,7 +316,7 @@ impl ProposalGenerator { pub fn generate_nil_block( &self, round: Round, - proposer_election: &mut UnequivocalProposerElection, + proposer_election: Arc, ) -> anyhow::Result { let hqc = self.ensure_highest_quorum_cert(round)?; let quorum_cert = hqc.as_ref().clone(); @@ -255,15 +340,18 @@ impl ProposalGenerator { /// 3. In case a given round is not greater than the calculated parent, return an OldRound /// error. pub async fn generate_proposal( - &mut self, + &self, round: Round, - proposer_election: &mut UnequivocalProposerElection, + proposer_election: Arc, wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result { - if self.last_round_generated < round { - self.last_round_generated = round; - } else { - bail!("Already proposed in the round {}", round); + { + let mut last_round_generated = self.last_round_generated.lock(); + if *last_round_generated < round { + *last_round_generated = round; + } else { + bail!("Already proposed in the round {}", round); + } } let hqc = self.ensure_highest_quorum_cert(round)?; @@ -312,28 +400,40 @@ impl ProposalGenerator { let voting_power_ratio = proposer_election.get_voting_power_participation_ratio(round); - let (max_block_txns, max_block_bytes, max_txns_from_block_to_execute, proposal_delay) = - self.calculate_max_block_sizes(voting_power_ratio, timestamp, round) - .await; + let ( + max_block_txns, + max_block_txns_after_filtering, + max_txns_from_block_to_execute, + proposal_delay, + ) = self + .calculate_max_block_sizes(voting_power_ratio, timestamp, round) + .await; + + PROPOSER_MAX_BLOCK_TXNS_AFTER_FILTERING.observe(max_block_txns_after_filtering as f64); + if let Some(max_to_execute) = max_txns_from_block_to_execute { + PROPOSER_MAX_BLOCK_TXNS_TO_EXECUTE.observe(max_to_execute as f64); + } - PROPOSER_DELAY_PROPOSAL.set(proposal_delay.as_secs_f64()); + PROPOSER_DELAY_PROPOSAL.observe(proposal_delay.as_secs_f64()); if !proposal_delay.is_zero() { tokio::time::sleep(proposal_delay).await; } - let max_pending_block_len = pending_blocks - .iter() - .map(|block| block.payload().map_or(0, |p| p.len())) - .max() - .unwrap_or(0); - let max_pending_block_bytes = pending_blocks + let max_pending_block_size = pending_blocks .iter() - .map(|block| block.payload().map_or(0, |p| p.size())) - .max() - .unwrap_or(0); + .map(|block| { + block.payload().map_or(PayloadTxnsSize::zero(), |p| { + PayloadTxnsSize::new(p.len() as u64, p.size() as u64) + }) + }) + .reduce(PayloadTxnsSize::maximum) + .unwrap_or_default(); // Use non-backpressure reduced values for computing fill_fraction - let max_fill_fraction = (max_pending_block_len as f32 / self.max_block_txns as f32) - .max(max_pending_block_bytes as f32 / self.max_block_bytes as f32); + let max_fill_fraction = + (max_pending_block_size.count() as f32 / self.max_block_txns.count() as f32).max( + max_pending_block_size.size_in_bytes() as f32 + / self.max_block_txns.size_in_bytes() as f32, + ); PROPOSER_PENDING_BLOCKS_COUNT.set(pending_blocks.len() as i64); PROPOSER_PENDING_BLOCKS_FILL_FRACTION.set(max_fill_fraction as f64); @@ -345,28 +445,33 @@ impl ProposalGenerator { .collect(); let validator_txn_filter = vtxn_pool::TransactionFilter::PendingTxnHashSet(pending_validator_txn_hashes); + let (validator_txns, mut payload) = self .payload_client .pull_payload( - self.quorum_store_poll_time.saturating_sub(proposal_delay), - max_block_txns, - max_block_bytes, - // TODO: Set max_inline_txns and max_inline_bytes correctly - self.max_inline_txns, - self.max_inline_bytes, + PayloadPullParameters { + max_poll_time: self.quorum_store_poll_time.saturating_sub(proposal_delay), + max_txns: max_block_txns, + max_txns_after_filtering: max_block_txns_after_filtering, + soft_max_txns_after_filtering: max_txns_from_block_to_execute + .unwrap_or(max_block_txns_after_filtering), + max_inline_txns: self.max_inline_txns, + opt_batch_txns_pct: 0, + user_txn_filter: payload_filter, + pending_ordering, + pending_uncommitted_blocks: pending_blocks.len(), + recent_max_fill_fraction: max_fill_fraction, + block_timestamp: timestamp, + }, validator_txn_filter, - payload_filter, wait_callback, - pending_ordering, - pending_blocks.len(), - max_fill_fraction, ) .await .context("Fail to retrieve payload")?; if !payload.is_direct() && max_txns_from_block_to_execute.is_some() - && payload.len() > max_txns_from_block_to_execute.unwrap() + && max_txns_from_block_to_execute.map_or(false, |v| payload.len() as u64 > v) { payload = payload.transform_to_quorum_store_v2(max_txns_from_block_to_execute); } @@ -406,66 +511,114 @@ impl ProposalGenerator { } async fn calculate_max_block_sizes( - &mut self, + &self, voting_power_ratio: f64, timestamp: Duration, round: Round, - ) -> (u64, u64, Option, Duration) { - let mut values_max_block_txns = vec![self.max_block_txns]; - let mut values_max_block_bytes = vec![self.max_block_bytes]; + ) -> (PayloadTxnsSize, u64, Option, Duration) { + let mut values_max_block_txns_after_filtering = vec![self.max_block_txns_after_filtering]; + let mut values_max_block = vec![self.max_block_txns]; let mut values_proposal_delay = vec![Duration::ZERO]; - let mut values_max_txns_from_block_to_execute = vec![]; let chain_health_backoff = self .chain_health_backoff_config .get_backoff(voting_power_ratio); if let Some(value) = chain_health_backoff { - values_max_block_txns.push(value.max_sending_block_txns_override); - values_max_block_bytes.push(value.max_sending_block_bytes_override); - if let Some(val) = value.max_txns_from_block_to_execute { - values_max_txns_from_block_to_execute.push(val); - } + values_max_block_txns_after_filtering + .push(value.max_sending_block_txns_after_filtering_override); + values_max_block.push( + self.max_block_txns + .compute_with_bytes(value.max_sending_block_bytes_override), + ); values_proposal_delay.push(Duration::from_millis(value.backoff_proposal_delay_ms)); CHAIN_HEALTH_BACKOFF_TRIGGERED.observe(1.0); } else { CHAIN_HEALTH_BACKOFF_TRIGGERED.observe(0.0); } + let pipeline_pending_latency = self.block_store.pipeline_pending_latency(timestamp); let pipeline_backpressure = self .pipeline_backpressure_config - .get_backoff(self.block_store.pipeline_pending_latency(timestamp)); + .get_backoff(pipeline_pending_latency); if let Some(value) = pipeline_backpressure { - values_max_block_txns.push(value.max_sending_block_txns_override); - values_max_block_bytes.push(value.max_sending_block_bytes_override); - if let Some(val) = value.max_txns_from_block_to_execute { - values_max_txns_from_block_to_execute.push(val); - } + values_max_block_txns_after_filtering + .push(value.max_sending_block_txns_after_filtering_override); + values_max_block.push( + self.max_block_txns + .compute_with_bytes(value.max_sending_block_bytes_override), + ); values_proposal_delay.push(Duration::from_millis(value.backpressure_proposal_delay_ms)); PIPELINE_BACKPRESSURE_ON_PROPOSAL_TRIGGERED.observe(1.0); } else { PIPELINE_BACKPRESSURE_ON_PROPOSAL_TRIGGERED.observe(0.0); }; - let max_block_txns = values_max_block_txns.into_iter().min().unwrap(); - let max_block_bytes = values_max_block_bytes.into_iter().min().unwrap(); - let proposal_delay = values_proposal_delay.into_iter().max().unwrap(); - let max_txns_from_block_to_execute = - values_max_txns_from_block_to_execute.into_iter().min(); - if pipeline_backpressure.is_some() || chain_health_backoff.is_some() { - warn!( - "Generating proposal: reducing limits to {} txns (filtered to {:?}) and {} bytes, due to pipeline_backpressure: {}, chain health backoff: {}. Delaying sending proposal by {}ms. Round: {}", - max_block_txns, - max_txns_from_block_to_execute, - max_block_bytes, - pipeline_backpressure.is_some(), - chain_health_backoff.is_some(), - proposal_delay.as_millis(), - round, - ); + let mut execution_backpressure_applied = false; + if let Some(config) = &self.pipeline_backpressure_config.execution { + let execution_backpressure = self + .pipeline_backpressure_config + .get_execution_block_size_backoff( + &self + .block_store + .get_recent_block_execution_times(config.num_blocks_to_look_at), + self.max_block_txns_after_filtering, + ); + if let Some(execution_backpressure_block_size) = execution_backpressure { + values_max_block_txns_after_filtering.push(execution_backpressure_block_size); + execution_backpressure_applied = true; + } } + EXECUTION_BACKPRESSURE_ON_PROPOSAL_TRIGGERED.observe( + if execution_backpressure_applied { + 1.0 + } else { + 0.0 + }, + ); + + let max_block_txns_after_filtering = values_max_block_txns_after_filtering + .into_iter() + .min() + .expect("always initialized to at least one value"); + + let max_block_size = values_max_block + .into_iter() + .reduce(PayloadTxnsSize::minimum) + .expect("always initialized to at least one value"); + let proposal_delay = values_proposal_delay + .into_iter() + .max() + .expect("always initialized to at least one value"); + + let (max_block_txns_after_filtering, max_txns_from_block_to_execute) = if self + .min_max_txns_in_block_after_filtering_from_backpressure + > max_block_txns_after_filtering + { + ( + self.min_max_txns_in_block_after_filtering_from_backpressure, + Some(max_block_txns_after_filtering), + ) + } else { + (max_block_txns_after_filtering, None) + }; + + warn!( + pipeline_pending_latency = pipeline_pending_latency.as_millis(), + proposal_delay_ms = proposal_delay.as_millis(), + max_block_txns_after_filtering = max_block_txns_after_filtering, + max_txns_from_block_to_execute = + max_txns_from_block_to_execute.unwrap_or(max_block_txns_after_filtering), + max_block_size = max_block_size, + is_pipeline_backpressure = pipeline_backpressure.is_some(), + is_execution_backpressure = execution_backpressure_applied, + is_chain_health_backoff = chain_health_backoff.is_some(), + round = round, + "Proposal generation backpressure details", + ); + ( - max_block_txns, - max_block_bytes, + max_block_size, + max_block_txns_after_filtering, max_txns_from_block_to_execute, proposal_delay, ) @@ -494,7 +647,7 @@ impl ProposalGenerator { round: Round, previous_round: Round, include_cur_round: bool, - proposer_election: &mut UnequivocalProposerElection, + proposer_election: Arc, ) -> Vec<(Round, Author)> { let end_round = round + u64::from(include_cur_round); let mut failed_authors = Vec::new(); diff --git a/consensus/src/liveness/proposal_generator_test.rs b/consensus/src/liveness/proposal_generator_test.rs index ab4102b7a39f3..aae56dc864644 100644 --- a/consensus/src/liveness/proposal_generator_test.rs +++ b/consensus/src/liveness/proposal_generator_test.rs @@ -17,6 +17,7 @@ use crate::{ use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::Author, + utils::PayloadTxnsSize, }; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_signer::ValidatorSigner}; use futures::{future::BoxFuture, FutureExt}; @@ -30,30 +31,31 @@ fn empty_callback() -> BoxFuture<'static, ()> { async fn test_proposal_generation_empty_tree() { let signer = ValidatorSigner::random(None); let block_store = build_empty_tree(); - let mut proposal_generator = ProposalGenerator::new( + let proposal_generator = ProposalGenerator::new( signer.author(), block_store.clone(), Arc::new(MockPayloadManager::new(None)), Arc::new(SimulatedTimeService::new()), Duration::ZERO, + PayloadTxnsSize::new(1, 10), 1, + PayloadTxnsSize::new(1, 10), 10, 1, - 10, - 10, PipelineBackpressureConfig::new_no_backoff(), ChainHealthBackoffConfig::new_no_backoff(), false, ValidatorTxnConfig::default_disabled(), true, ); - let mut proposer_election = - UnequivocalProposerElection::new(Arc::new(RotatingProposer::new(vec![signer.author()], 1))); + let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( + RotatingProposer::new(vec![signer.author()], 1), + ))); let genesis = block_store.ordered_root(); // Generate proposals for an empty tree. let proposal_data = proposal_generator - .generate_proposal(1, &mut proposer_election, empty_callback()) + .generate_proposal(1, proposer_election.clone(), empty_callback()) .await .unwrap(); let proposal = Block::new_proposal_from_block_data(proposal_data, &signer).unwrap(); @@ -64,7 +66,7 @@ async fn test_proposal_generation_empty_tree() { // Duplicate proposals on the same round are not allowed let proposal_err = proposal_generator - .generate_proposal(1, &mut proposer_election, empty_callback()) + .generate_proposal(1, proposer_election.clone(), empty_callback()) .await .err(); assert!(proposal_err.is_some()); @@ -74,26 +76,25 @@ async fn test_proposal_generation_empty_tree() { async fn test_proposal_generation_parent() { let mut inserter = TreeInserter::default(); let block_store = inserter.block_store(); - let mut proposal_generator = ProposalGenerator::new( + let proposal_generator = ProposalGenerator::new( inserter.signer().author(), block_store.clone(), Arc::new(MockPayloadManager::new(None)), Arc::new(SimulatedTimeService::new()), Duration::ZERO, + PayloadTxnsSize::new(1, 1000), 1, - 1000, - 1, - 500, + PayloadTxnsSize::new(1, 500), 10, + 1, PipelineBackpressureConfig::new_no_backoff(), ChainHealthBackoffConfig::new_no_backoff(), false, ValidatorTxnConfig::default_disabled(), true, ); - let mut proposer_election = UnequivocalProposerElection::new(Arc::new(RotatingProposer::new( - vec![inserter.signer().author()], - 1, + let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( + RotatingProposer::new(vec![inserter.signer().author()], 1), ))); let genesis = block_store.ordered_root(); let a1 = inserter @@ -104,7 +105,7 @@ async fn test_proposal_generation_parent() { .await; let original_res = proposal_generator - .generate_proposal(10, &mut proposer_election, empty_callback()) + .generate_proposal(10, proposer_election.clone(), empty_callback()) .await .unwrap(); // With no certifications the parent is genesis @@ -118,7 +119,7 @@ async fn test_proposal_generation_parent() { // Once a1 is certified, it should be the one to choose from inserter.insert_qc_for_block(a1.as_ref(), None); let a1_child_res = proposal_generator - .generate_proposal(11, &mut proposer_election, empty_callback()) + .generate_proposal(11, proposer_election.clone(), empty_callback()) .await .unwrap(); assert_eq!(a1_child_res.parent_id(), a1.id()); @@ -133,7 +134,7 @@ async fn test_proposal_generation_parent() { // Once b1 is certified, it should be the one to choose from inserter.insert_qc_for_block(b1.as_ref(), None); let b1_child_res = proposal_generator - .generate_proposal(15, &mut proposer_election, empty_callback()) + .generate_proposal(15, proposer_election.clone(), empty_callback()) .await .unwrap(); assert_eq!(b1_child_res.parent_id(), b1.id()); @@ -150,26 +151,25 @@ async fn test_proposal_generation_parent() { async fn test_old_proposal_generation() { let mut inserter = TreeInserter::default(); let block_store = inserter.block_store(); - let mut proposal_generator = ProposalGenerator::new( + let proposal_generator = ProposalGenerator::new( inserter.signer().author(), block_store.clone(), Arc::new(MockPayloadManager::new(None)), Arc::new(SimulatedTimeService::new()), Duration::ZERO, + PayloadTxnsSize::new(1, 1000), 1, - 1000, - 1, - 500, + PayloadTxnsSize::new(1, 500), 10, + 1, PipelineBackpressureConfig::new_no_backoff(), ChainHealthBackoffConfig::new_no_backoff(), false, ValidatorTxnConfig::default_disabled(), true, ); - let mut proposer_election = UnequivocalProposerElection::new(Arc::new(RotatingProposer::new( - vec![inserter.signer().author()], - 1, + let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( + RotatingProposer::new(vec![inserter.signer().author()], 1), ))); let genesis = block_store.ordered_root(); let a1 = inserter @@ -178,7 +178,7 @@ async fn test_old_proposal_generation() { inserter.insert_qc_for_block(a1.as_ref(), None); let proposal_err = proposal_generator - .generate_proposal(1, &mut proposer_election, empty_callback()) + .generate_proposal(1, proposer_election.clone(), empty_callback()) .await .err(); assert!(proposal_err.is_some()); @@ -191,31 +191,30 @@ async fn test_correct_failed_authors() { let peer1 = Author::random(); let peer2 = Author::random(); let block_store = inserter.block_store(); - let mut proposal_generator = ProposalGenerator::new( + let proposal_generator = ProposalGenerator::new( author, block_store.clone(), Arc::new(MockPayloadManager::new(None)), Arc::new(SimulatedTimeService::new()), Duration::ZERO, + PayloadTxnsSize::new(1, 1000), 1, - 1000, - 1, - 500, + PayloadTxnsSize::new(1, 500), 10, + 1, PipelineBackpressureConfig::new_no_backoff(), ChainHealthBackoffConfig::new_no_backoff(), false, ValidatorTxnConfig::default_disabled(), true, ); - let mut proposer_election = UnequivocalProposerElection::new(Arc::new(RotatingProposer::new( - vec![author, peer1, peer2], - 1, + let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( + RotatingProposer::new(vec![author, peer1, peer2], 1), ))); let genesis = block_store.ordered_root(); let result = proposal_generator - .generate_proposal(6, &mut proposer_election, empty_callback()) + .generate_proposal(6, proposer_election.clone(), empty_callback()) .await .unwrap(); // With no certifications the parent is genesis diff --git a/consensus/src/liveness/proposer_election.rs b/consensus/src/liveness/proposer_election.rs index d9935bd731bac..0a898a5c6ce97 100644 --- a/consensus/src/liveness/proposer_election.rs +++ b/consensus/src/liveness/proposer_election.rs @@ -66,7 +66,7 @@ pub(crate) fn choose_index(mut weights: Vec, state: Vec) -> usize { Ordering::Greater } }) - .unwrap_err() + .expect_err("Comparison never returns equals, so it's always guaranteed to be error") } #[test] diff --git a/consensus/src/liveness/round_state.rs b/consensus/src/liveness/round_state.rs index 5384e09f93037..ea7e6e7f5b362 100644 --- a/consensus/src/liveness/round_state.rs +++ b/consensus/src/liveness/round_state.rs @@ -310,7 +310,7 @@ impl RoundState { } } - pub async fn process_delayed_qc_msg( + pub fn process_delayed_qc_msg( &mut self, validator_verifier: &ValidatorVerifier, msg: DelayedQcMsg, @@ -324,11 +324,6 @@ impl RoundState { self.vote_sent.clone() } - /// Setup a longer timeout task for leader because it enters the round earlier. - pub fn setup_leader_timeout(&mut self) { - self.setup_timeout(2); - } - /// Setup the timeout task and return the duration of the current timeout fn setup_timeout(&mut self, multiplier: u32) -> Duration { let timeout_sender = self.timeout_sender.clone(); diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 13dcf905784a7..517c01fce472c 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -170,8 +170,6 @@ pub struct NetworkReceivers { #[async_trait::async_trait] pub trait QuorumStoreSender: Send + Clone { - async fn send_batch_request(&self, request: BatchRequest, recipients: Vec); - async fn request_batch( &self, request: BatchRequest, @@ -179,8 +177,6 @@ pub trait QuorumStoreSender: Send + Clone { timeout: Duration, ) -> anyhow::Result; - async fn send_batch(&self, batch: Batch, recipients: Vec); - async fn send_signed_batch_info_msg( &self, signed_batch_infos: Vec, @@ -350,7 +346,7 @@ impl NetworkSender { if self.author == peer { let self_msg = Event::Message(self.author, msg.clone()); if let Err(err) = self_sender.send(self_msg).await { - error!(error = ?err, "Error delivering a self msg"); + warn!(error = ?err, "Error delivering a self msg"); } continue; } @@ -474,12 +470,6 @@ impl NetworkSender { #[async_trait::async_trait] impl QuorumStoreSender for NetworkSender { - async fn send_batch_request(&self, request: BatchRequest, recipients: Vec) { - fail_point!("consensus::send::batch_request", |_| ()); - let msg = ConsensusMsg::BatchRequestMsg(Box::new(request)); - self.send(msg, recipients).await - } - async fn request_batch( &self, request: BatchRequest, @@ -506,12 +496,6 @@ impl QuorumStoreSender for NetworkSender { } } - async fn send_batch(&self, batch: Batch, recipients: Vec) { - fail_point!("consensus::send::batch", |_| ()); - let msg = ConsensusMsg::BatchResponse(Box::new(batch)); - self.send(msg, recipients).await - } - async fn send_signed_batch_info_msg( &self, signed_batch_infos: Vec, diff --git a/consensus/src/network_tests.rs b/consensus/src/network_tests.rs index ad26baccdf6f3..f1d18d90d6e7c 100644 --- a/consensus/src/network_tests.rs +++ b/consensus/src/network_tests.rs @@ -8,7 +8,7 @@ use crate::{ test_utils::{self, consensus_runtime, placeholder_ledger_info, timed_block_on}, }; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; -use aptos_config::network_id::NetworkId; +use aptos_config::network_id::{NetworkId, PeerNetworkId}; use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::Author, @@ -21,14 +21,13 @@ use aptos_consensus_types::{ use aptos_infallible::{Mutex, RwLock}; use aptos_network::{ application::storage::PeersAndMetadata, - peer_manager::{ - ConnectionRequestSender, PeerManagerNotification, PeerManagerRequest, - PeerManagerRequestSender, - }, + peer_manager::{ConnectionRequestSender, PeerManagerRequest, PeerManagerRequestSender}, protocols::{ - network::{NewNetworkEvents, RpcError, SerializedRequest}, - rpc::InboundRpcRequest, - wire::handshake::v1::ProtocolIdSet, + network::{NewNetworkEvents, ReceivedMessage, RpcError, SerializedRequest}, + wire::{ + handshake::v1::ProtocolIdSet, + messaging::v1::{DirectSendMsg, NetworkMessage, RpcRequest}, + }, }, ProtocolId, }; @@ -65,11 +64,8 @@ pub struct NetworkPlayground { /// These events will usually be handled by the event loop spawned in /// `ConsensusNetworkImpl`. /// - node_consensus_txs: Arc< - Mutex< - HashMap>, - >, - >, + node_consensus_txs: + Arc>>>, /// Nodes' outbound handlers forward their outbound non-rpc messages to this /// queue. outbound_msgs_tx: mpsc::Sender<(TwinId, PeerManagerRequest)>, @@ -131,12 +127,7 @@ impl NetworkPlayground { mut network_reqs_rx: aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, mut outbound_msgs_tx: mpsc::Sender<(TwinId, PeerManagerRequest)>, node_consensus_txs: Arc< - Mutex< - HashMap< - TwinId, - aptos_channel::Sender<(PeerId, ProtocolId), PeerManagerNotification>, - >, - >, + Mutex>>, >, author_to_twin_ids: Arc>, ) { @@ -175,16 +166,23 @@ impl NetworkPlayground { let node_consensus_tx = node_consensus_txs.lock().get(dst_twin_id).unwrap().clone(); - let inbound_req = InboundRpcRequest { - protocol_id: outbound_req.protocol_id, - data: outbound_req.data, - res_tx: outbound_req.res_tx, - }; - node_consensus_tx .push( (src_twin_id.author, ProtocolId::ConsensusRpcBcs), - PeerManagerNotification::RecvRpc(src_twin_id.author, inbound_req), + ReceivedMessage { + message: NetworkMessage::RpcRequest(RpcRequest { + protocol_id: outbound_req.protocol_id, + request_id: 123, + priority: 0, + raw_request: outbound_req.data.into(), + }), + sender: PeerNetworkId::new( + NetworkId::Validator, + src_twin_id.author, + ), + receive_timestamp_micros: 0, + rpc_replier: Some(Arc::new(outbound_req.res_tx)), + }, ) .unwrap(); }, @@ -201,7 +199,7 @@ impl NetworkPlayground { pub fn add_node( &mut self, twin_id: TwinId, - consensus_tx: aptos_channel::Sender<(PeerId, ProtocolId), PeerManagerNotification>, + consensus_tx: aptos_channel::Sender<(PeerId, ProtocolId), ReceivedMessage>, network_reqs_rx: aptos_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>, conn_mgr_reqs_rx: aptos_channels::Receiver, ) { @@ -231,7 +229,7 @@ impl NetworkPlayground { &mut self, src_twin_id: TwinId, dst_twin_id: TwinId, - msg_notif: PeerManagerNotification, + rmsg: ReceivedMessage, ) -> (Author, ConsensusMsg) { let node_consensus_tx = self .node_consensus_txs @@ -241,21 +239,24 @@ impl NetworkPlayground { .clone(); // copy message data - let msg_copy = match &msg_notif { - PeerManagerNotification::RecvMessage(src, msg) => { - let msg: ConsensusMsg = msg.to_message().unwrap(); - (*src, msg) + let source_address = rmsg.sender.peer_id(); + let consensus_msg = match &rmsg.message { + NetworkMessage::DirectSendMsg(dmsg) => dmsg + .protocol_id + .from_bytes(dmsg.raw_msg.as_slice()) + .unwrap(), + wrong_message => { + panic!( + "[network playground] Unexpected ReceivedMessage: {:?}", + wrong_message + ); }, - msg_notif => panic!( - "[network playground] Unexpected PeerManagerNotification: {:?}", - msg_notif - ), }; let _ = node_consensus_tx.push( (src_twin_id.author, ProtocolId::ConsensusDirectSendBcs), - msg_notif, + rmsg, ); - msg_copy + (source_address, consensus_msg) } /// Wait for exactly `num_messages` to be enqueued and delivered. Return a @@ -276,7 +277,7 @@ impl NetworkPlayground { let (src_twin_id, net_req) = self.outbound_msgs_rx.next().await .expect("[network playground] waiting for messages, but message queue has shutdown unexpectedly"); - // Convert PeerManagerRequest to corresponding PeerManagerNotification, + // Convert PeerManagerRequest to corresponding ReceivedMessage, // and extract destination peer let (dst, msg) = match &net_req { PeerManagerRequest::SendDirectSend(dst_inner, msg_inner) => { @@ -294,11 +295,17 @@ impl NetworkPlayground { // Deliver and copy message if it's not dropped if !self.is_message_dropped(&src_twin_id, dst_twin_id, consensus_msg) { - let msg_notif = - PeerManagerNotification::RecvMessage(src_twin_id.author, msg.clone()); - let msg_copy = self - .deliver_message(src_twin_id, *dst_twin_id, msg_notif) - .await; + let rmsg = ReceivedMessage { + message: NetworkMessage::DirectSendMsg(DirectSendMsg { + protocol_id: msg.protocol_id, + priority: 0, + raw_msg: msg.mdata.clone().into(), + }), + sender: PeerNetworkId::new(NetworkId::Validator, src_twin_id.author), + receive_timestamp_micros: 0, + rpc_replier: None, + }; + let msg_copy = self.deliver_message(src_twin_id, *dst_twin_id, rmsg).await; // Only insert msg_copy once for twins (if delivered) if idx == 0 && msg_inspector(&msg_copy) { @@ -395,7 +402,7 @@ impl NetworkPlayground { pub async fn start(mut self) { // Take the next queued message while let Some((src_twin_id, net_req)) = self.outbound_msgs_rx.next().await { - // Convert PeerManagerRequest to corresponding PeerManagerNotification, + // Convert PeerManagerRequest to corresponding ReceivedMessage, // and extract destination peer let (dst, msg) = match &net_req { PeerManagerRequest::SendDirectSend(dst_inner, msg_inner) => { @@ -410,14 +417,21 @@ impl NetworkPlayground { let dst_twin_ids = self.get_twin_ids(dst); for dst_twin_id in dst_twin_ids.iter() { - let msg_notif = - PeerManagerNotification::RecvMessage(src_twin_id.author, msg.clone()); + let rmsg = ReceivedMessage { + message: NetworkMessage::DirectSendMsg(DirectSendMsg { + protocol_id: msg.protocol_id, + priority: 0, + raw_msg: msg.mdata.clone().into(), + }), + sender: PeerNetworkId::new(NetworkId::Validator, src_twin_id.author), + receive_timestamp_micros: 0, + rpc_replier: None, + }; let consensus_msg = msg.to_message().unwrap(); // Deliver and copy message it if it's not dropped if !self.is_message_dropped(&src_twin_id, dst_twin_id, consensus_msg) { - self.deliver_message(src_twin_id, *dst_twin_id, msg_notif) - .await; + self.deliver_message(src_twin_id, *dst_twin_id, rmsg).await; } } } @@ -531,7 +545,6 @@ mod tests { storage::PeersAndMetadata, }, protocols::{ - direct_send::Message, network, network::{NetworkEvents, NewNetworkSender}, }, @@ -849,10 +862,16 @@ mod tests { let peer_id = PeerId::random(); let protocol_id = ProtocolId::ConsensusDirectSendBcs; - let bad_msg = PeerManagerNotification::RecvMessage(peer_id, Message { - protocol_id, - mdata: Bytes::from_static(b"\xde\xad\xbe\xef"), - }); + let bad_msg = ReceivedMessage { + message: NetworkMessage::DirectSendMsg(DirectSendMsg { + protocol_id, + priority: 0, + raw_msg: Bytes::from_static(b"\xde\xad\xbe\xef").into(), + }), + sender: PeerNetworkId::new(NetworkId::Validator, peer_id), + receive_timestamp_micros: 0, + rpc_replier: None, + }; peer_mgr_notifs_tx .push((peer_id, protocol_id), bad_msg) @@ -864,11 +883,17 @@ mod tests { let protocol_id = ProtocolId::ConsensusRpcJson; let (res_tx, _res_rx) = oneshot::channel(); - let liveness_check_msg = PeerManagerNotification::RecvRpc(peer_id, InboundRpcRequest { - protocol_id, - data: Bytes::from(serde_json::to_vec(&liveness_check_msg).unwrap()), - res_tx, - }); + let liveness_check_msg = ReceivedMessage { + message: NetworkMessage::RpcRequest(RpcRequest { + protocol_id, + request_id: 0, // TODO: seq? + priority: 0, + raw_request: Bytes::from(serde_json::to_vec(&liveness_check_msg).unwrap()).into(), + }), + sender: PeerNetworkId::new(NetworkId::Validator, peer_id), + receive_timestamp_micros: 0, + rpc_replier: Some(Arc::new(res_tx)), + }; peer_mgr_notifs_tx .push((peer_id, protocol_id), liveness_check_msg) diff --git a/consensus/src/payload_client/mixed.rs b/consensus/src/payload_client/mixed.rs index 63cc7f00a62c3..5e35b5aff6bae 100644 --- a/consensus/src/payload_client/mixed.rs +++ b/consensus/src/payload_client/mixed.rs @@ -1,27 +1,18 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -#[cfg(test)] -use crate::payload_client::user; -#[cfg(test)] -use crate::payload_client::validator::DummyValidatorTxnClient; +use super::PayloadPullParameters; use crate::{ error::QuorumStoreError, payload_client::{user::UserPayloadClient, PayloadClient}, }; -use aptos_consensus_types::common::{Payload, PayloadFilter}; +use aptos_consensus_types::{common::Payload, utils::PayloadTxnsSize}; use aptos_logger::debug; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; -use aptos_validator_transaction_pool as vtxn_pool; +use aptos_validator_transaction_pool::TransactionFilter; use fail::fail_point; use futures::future::BoxFuture; -#[cfg(test)] -use std::collections::HashSet; -use std::{ - cmp::min, - sync::Arc, - time::{Duration, Instant}, -}; +use std::{cmp::min, sync::Arc, time::Instant}; pub struct MixedPayloadClient { validator_txn_config: ValidatorTxnConfig, @@ -66,218 +57,244 @@ impl MixedPayloadClient { impl PayloadClient for MixedPayloadClient { async fn pull_payload( &self, - mut max_poll_time: Duration, - mut max_items: u64, - mut max_bytes: u64, - max_inline_items: u64, - max_inline_bytes: u64, - validator_txn_filter: vtxn_pool::TransactionFilter, - user_txn_filter: PayloadFilter, + params: PayloadPullParameters, + validator_txn_filter: TransactionFilter, wait_callback: BoxFuture<'static, ()>, - pending_ordering: bool, - pending_uncommitted_blocks: usize, - recent_max_fill_fraction: f32, ) -> anyhow::Result<(Vec, Payload), QuorumStoreError> { // Pull validator txns first. let validator_txn_pull_timer = Instant::now(); let mut validator_txns = self .validator_txn_pool_client .pull( - max_poll_time, + params.max_poll_time, min( - max_items, + params.max_txns.count(), self.validator_txn_config.per_block_limit_txn_count(), ), min( - max_bytes, + params.max_txns.size_in_bytes(), self.validator_txn_config.per_block_limit_total_bytes(), ), validator_txn_filter, ) .await; + let vtxn_size = PayloadTxnsSize::new( + validator_txns.len() as u64, + validator_txns + .iter() + .map(|txn| txn.size_in_bytes()) + .sum::() as u64, + ); validator_txns.extend(self.extra_test_only_vtxns()); debug!("num_validator_txns={}", validator_txns.len()); // Update constraints with validator txn pull results. - max_items -= validator_txns.len() as u64; - max_bytes -= validator_txns - .iter() - .map(|txn| txn.size_in_bytes()) - .sum::() as u64; - max_poll_time = max_poll_time.saturating_sub(validator_txn_pull_timer.elapsed()); + let mut user_txn_pull_params = params; + user_txn_pull_params.max_txns -= vtxn_size; + user_txn_pull_params.max_txns_after_filtering -= validator_txns.len() as u64; + user_txn_pull_params.soft_max_txns_after_filtering -= validator_txns.len() as u64; + user_txn_pull_params.max_poll_time = user_txn_pull_params + .max_poll_time + .saturating_sub(validator_txn_pull_timer.elapsed()); // Pull user payload. let user_payload = self .user_payload_client - .pull( - max_poll_time, - max_items, - max_bytes, - max_inline_items, - max_inline_bytes, - user_txn_filter, - wait_callback, - pending_ordering, - pending_uncommitted_blocks, - recent_max_fill_fraction, - ) + .pull(user_txn_pull_params, wait_callback) .await?; Ok((validator_txns, user_payload)) } } -#[tokio::test] -async fn mixed_payload_client_should_prioritize_validator_txns() { - let all_validator_txns = vec![ - ValidatorTransaction::dummy(b"1".to_vec()), - ValidatorTransaction::dummy(b"22".to_vec()), - ValidatorTransaction::dummy(b"333".to_vec()), - ]; - - let all_user_txns = crate::test_utils::create_vec_signed_transactions(10); - let client = MixedPayloadClient { - validator_txn_config: ValidatorTxnConfig::V1 { - per_block_limit_txn_count: 99, - per_block_limit_total_bytes: 1048576, - }, - validator_txn_pool_client: Arc::new(DummyValidatorTxnClient::new( - all_validator_txns.clone(), - )), - user_payload_client: Arc::new(user::DummyClient::new(all_user_txns.clone())), +#[cfg(test)] +mod tests { + use crate::payload_client::{ + mixed::MixedPayloadClient, user, validator::DummyValidatorTxnClient, PayloadClient, + PayloadPullParameters, }; + use aptos_consensus_types::common::{Payload, PayloadFilter}; + use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; + use aptos_validator_transaction_pool as vtxn_pool; + use std::{collections::HashSet, sync::Arc, time::Duration}; - let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client - .pull_payload( - Duration::from_secs(1), // max_poll_time - 99, // max_items - 1048576, // size limit: 1MB - 50, - 500000, // inline limit: 500KB - vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - PayloadFilter::Empty, - Box::pin(async {}), - false, - 0, - 0., - ) - .await - .unwrap() - else { - unreachable!() - }; + #[tokio::test] + async fn mixed_payload_client_should_prioritize_validator_txns() { + let all_validator_txns = vec![ + ValidatorTransaction::dummy(b"1".to_vec()), + ValidatorTransaction::dummy(b"22".to_vec()), + ValidatorTransaction::dummy(b"333".to_vec()), + ]; - assert_eq!(3, pulled_validator_txns.len()); - assert_eq!(10, pulled_user_txns.len()); + let all_user_txns = crate::test_utils::create_vec_signed_transactions(10); + let client = MixedPayloadClient { + validator_txn_config: ValidatorTxnConfig::V1 { + per_block_limit_txn_count: 99, + per_block_limit_total_bytes: 1048576, + }, + validator_txn_pool_client: Arc::new(DummyValidatorTxnClient::new( + all_validator_txns.clone(), + )), + user_payload_client: Arc::new(user::DummyClient::new(all_user_txns.clone())), + }; - let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client - .pull_payload( - Duration::from_micros(500), // max_poll_time - 99, // max_items - 1048576, // size limit: 1MB - 50, - 500000, // inline limit: 500KB - vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - PayloadFilter::Empty, - Box::pin(async {}), - false, - 0, - 0., - ) - .await - .unwrap() - else { - unreachable!() - }; + let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client + .pull_payload( + PayloadPullParameters::new_for_test( + Duration::from_secs(1), // max_poll_time + 120, // max_items + 1048576, // size limit: 1MB + 99, // max_unique_items + 99, + 50, + 500000, // inline limit: 500KB + PayloadFilter::Empty, + false, + 0, + 0., + aptos_infallible::duration_since_epoch(), + ), + vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), + Box::pin(async {}), + ) + .await + .unwrap() + else { + unreachable!() + }; - assert_eq!(1, pulled_validator_txns.len()); - assert_eq!(0, pulled_user_txns.len()); + assert_eq!(3, pulled_validator_txns.len()); + assert_eq!(10, pulled_user_txns.len()); - let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client - .pull_payload( - Duration::from_secs(1), // max_poll_time - 1, // max_items - 1048576, // size limit: 1MB - 0, - 0, // inline limit: 0 - vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - PayloadFilter::Empty, - Box::pin(async {}), - false, - 0, - 0., - ) - .await - .unwrap() - else { - unreachable!() - }; + let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client + .pull_payload( + PayloadPullParameters::new_for_test( + Duration::from_micros(500), // max_poll_time + 120, // max_items + 1048576, // size limit: 1MB + 99, // max_unique_items + 99, + 50, + 500000, // inline limit: 500KB + PayloadFilter::Empty, + false, + 0, + 0., + aptos_infallible::duration_since_epoch(), + ), + vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), + Box::pin(async {}), + ) + .await + .unwrap() + else { + unreachable!() + }; - assert_eq!(1, pulled_validator_txns.len()); - assert_eq!(0, pulled_user_txns.len()); + assert_eq!(1, pulled_validator_txns.len()); + assert_eq!(0, pulled_user_txns.len()); - let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client - .pull_payload( - Duration::from_secs(1), // max_poll_time - 99, // max_items - all_validator_txns[0].size_in_bytes() as u64, - 50, - all_validator_txns[0].size_in_bytes() as u64, - vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - PayloadFilter::Empty, - Box::pin(async {}), - false, - 0, - 0., - ) - .await - .unwrap() - else { - unreachable!() - }; + let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client + .pull_payload( + PayloadPullParameters::new_for_test( + Duration::from_secs(1), // max_poll_time + 2, // max_items + 1048576, // size limit: 1MB + 2, // max_unique_items + 2, + 0, + 0, // inline limit: 0 + PayloadFilter::Empty, + false, + 0, + 0., + aptos_infallible::duration_since_epoch(), + ), + vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), + Box::pin(async {}), + ) + .await + .unwrap() + else { + unreachable!() + }; - assert_eq!(1, pulled_validator_txns.len()); - assert_eq!(0, pulled_user_txns.len()); -} + assert_eq!(2, pulled_validator_txns.len()); + assert_eq!(0, pulled_user_txns.len()); -#[tokio::test] -async fn mixed_payload_client_should_respect_validator_txn_feature_flag() { - let all_validator_txns = vec![ - ValidatorTransaction::dummy(b"1".to_vec()), - ValidatorTransaction::dummy(b"22".to_vec()), - ValidatorTransaction::dummy(b"333".to_vec()), - ]; + let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client + .pull_payload( + PayloadPullParameters::new_for_test( + Duration::from_secs(1), // max_poll_time + 40, // max_items + all_validator_txns[0].size_in_bytes() as u64, + 30, // max_unique_items + 30, + 10, + all_validator_txns[0].size_in_bytes() as u64, + PayloadFilter::Empty, + false, + 0, + 0., + aptos_infallible::duration_since_epoch(), + ), + vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), + Box::pin(async {}), + ) + .await + .unwrap() + else { + unreachable!() + }; - let all_user_txns = crate::test_utils::create_vec_signed_transactions(10); - let client = MixedPayloadClient { - validator_txn_config: ValidatorTxnConfig::default_disabled(), - validator_txn_pool_client: Arc::new(DummyValidatorTxnClient::new( - all_validator_txns.clone(), - )), - user_payload_client: Arc::new(user::DummyClient::new(all_user_txns.clone())), - }; + assert_eq!(1, pulled_validator_txns.len()); + assert_eq!(0, pulled_user_txns.len()); + } - let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client - .pull_payload( - Duration::from_millis(50), // max_poll_time - 99, // max_items - 1048576, // size limit: 1MB - 50, - 500000, // inline limit: 500KB - vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - PayloadFilter::Empty, - Box::pin(async {}), - false, - 0, - 0., - ) - .await - .unwrap() - else { - unreachable!() - }; + #[tokio::test] + async fn mixed_payload_client_should_respect_validator_txn_feature_flag() { + let all_validator_txns = vec![ + ValidatorTransaction::dummy(b"1".to_vec()), + ValidatorTransaction::dummy(b"22".to_vec()), + ValidatorTransaction::dummy(b"333".to_vec()), + ]; - assert_eq!(0, pulled_validator_txns.len()); - assert_eq!(10, pulled_user_txns.len()); + let all_user_txns = crate::test_utils::create_vec_signed_transactions(10); + let client = MixedPayloadClient { + validator_txn_config: ValidatorTxnConfig::default_disabled(), + validator_txn_pool_client: Arc::new(DummyValidatorTxnClient::new( + all_validator_txns.clone(), + )), + user_payload_client: Arc::new(user::DummyClient::new(all_user_txns.clone())), + }; + + let (pulled_validator_txns, Payload::DirectMempool(pulled_user_txns)) = client + .pull_payload( + PayloadPullParameters::new_for_test( + Duration::from_millis(50), // max_poll_time + 120, // max_items + 1048576, // size limit: 1MB + 99, // max_unique_items + 99, + 50, + 500000, // inline limit: 500KB + PayloadFilter::Empty, + false, + 0, + 0., + aptos_infallible::duration_since_epoch(), + ), + vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), + Box::pin(async {}), + ) + .await + .unwrap() + else { + unreachable!() + }; + + assert_eq!(0, pulled_validator_txns.len()); + assert_eq!(10, pulled_user_txns.len()); + } } diff --git a/consensus/src/payload_client/mod.rs b/consensus/src/payload_client/mod.rs index d37cbfbbdb5ac..1b769faa9c36a 100644 --- a/consensus/src/payload_client/mod.rs +++ b/consensus/src/payload_client/mod.rs @@ -2,9 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 use crate::error::QuorumStoreError; -use aptos_consensus_types::common::{Payload, PayloadFilter}; +use aptos_consensus_types::{ + common::{Payload, PayloadFilter}, + utils::PayloadTxnsSize, +}; use aptos_types::validator_txn::ValidatorTransaction; use aptos_validator_transaction_pool::TransactionFilter; +use core::fmt; use futures::future::BoxFuture; use std::time::Duration; @@ -12,22 +16,80 @@ pub mod mixed; pub mod user; pub mod validator; +pub struct PayloadPullParameters { + pub max_poll_time: Duration, + pub max_txns: PayloadTxnsSize, + pub max_txns_after_filtering: u64, + pub soft_max_txns_after_filtering: u64, + pub max_inline_txns: PayloadTxnsSize, + pub opt_batch_txns_pct: u8, + pub user_txn_filter: PayloadFilter, + pub pending_ordering: bool, + pub pending_uncommitted_blocks: usize, + pub recent_max_fill_fraction: f32, + pub block_timestamp: Duration, +} + +impl PayloadPullParameters { + #[cfg(test)] + fn new_for_test( + max_poll_time: Duration, + max_txns: u64, + max_txns_bytes: u64, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + max_inline_txns: u64, + max_inline_txns_bytes: u64, + user_txn_filter: PayloadFilter, + pending_ordering: bool, + pending_uncommitted_blocks: usize, + recent_max_fill_fraction: f32, + block_timestamp: Duration, + ) -> Self { + Self { + max_poll_time, + max_txns: PayloadTxnsSize::new(max_txns, max_txns_bytes), + max_txns_after_filtering, + soft_max_txns_after_filtering, + max_inline_txns: PayloadTxnsSize::new(max_inline_txns, max_inline_txns_bytes), + opt_batch_txns_pct: 0, + user_txn_filter, + pending_ordering, + pending_uncommitted_blocks, + recent_max_fill_fraction, + block_timestamp, + } + } +} + +impl fmt::Debug for PayloadPullParameters { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PayloadPullParameters") + .field("max_poll_time", &self.max_poll_time) + .field("max_items", &self.max_txns) + .field("max_unique_items", &self.max_txns_after_filtering) + .field( + "soft_max_txns_after_filtering", + &self.soft_max_txns_after_filtering, + ) + .field("max_inline_items", &self.max_inline_txns) + .field("pending_ordering", &self.pending_ordering) + .field( + "pending_uncommitted_blocks", + &self.pending_uncommitted_blocks, + ) + .field("recent_max_fill_fraction", &self.recent_max_fill_fraction) + .field("block_timestamp", &self.block_timestamp) + .finish() + } +} + #[async_trait::async_trait] pub trait PayloadClient: Send + Sync { async fn pull_payload( &self, - max_poll_time: Duration, - max_items: u64, - max_bytes: u64, - max_inline_items: u64, - max_inline_bytes: u64, + config: PayloadPullParameters, validator_txn_filter: TransactionFilter, - user_txn_filter: PayloadFilter, wait_callback: BoxFuture<'static, ()>, - pending_ordering: bool, - pending_uncommitted_blocks: usize, - recent_max_fill_fraction: f32, ) -> anyhow::Result<(Vec, Payload), QuorumStoreError>; - - fn trace_payloads(&self) {} } diff --git a/consensus/src/payload_client/user/mod.rs b/consensus/src/payload_client/user/mod.rs index 6a5cc6db745ff..9d6cafbed2322 100644 --- a/consensus/src/payload_client/user/mod.rs +++ b/consensus/src/payload_client/user/mod.rs @@ -1,11 +1,13 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use super::PayloadPullParameters; use crate::error::QuorumStoreError; -use aptos_consensus_types::common::{Payload, PayloadFilter}; +use aptos_consensus_types::common::Payload; #[cfg(test)] use aptos_types::transaction::SignedTransaction; use futures::future::BoxFuture; +#[cfg(test)] use std::time::Duration; #[cfg(test)] use std::time::Instant; @@ -16,16 +18,8 @@ use std::time::Instant; pub trait UserPayloadClient: Send + Sync { async fn pull( &self, - max_poll_time: Duration, - max_items: u64, - max_bytes: u64, - max_inline_items: u64, - max_inline_bytes: u64, - exclude: PayloadFilter, + params: PayloadPullParameters, wait_callback: BoxFuture<'static, ()>, - pending_ordering: bool, - pending_uncommitted_blocks: usize, - recent_max_fill_fraction: f32, ) -> anyhow::Result; } @@ -47,33 +41,33 @@ impl DummyClient { impl UserPayloadClient for DummyClient { async fn pull( &self, - max_poll_time: Duration, - mut max_items: u64, - mut max_bytes: u64, - _max_inline_items: u64, - _max_inline_bytes: u64, - _exclude: PayloadFilter, + mut params: PayloadPullParameters, _wait_callback: BoxFuture<'static, ()>, - _pending_ordering: bool, - _pending_uncommitted_blocks: usize, - _recent_max_fill_fraction: f32, ) -> anyhow::Result { + use aptos_consensus_types::utils::PayloadTxnsSize; + let timer = Instant::now(); let mut nxt_txn_idx = 0; let mut txns = vec![]; - while timer.elapsed() < max_poll_time - && max_items >= 1 - && max_bytes >= 1 + while timer.elapsed() < params.max_poll_time + && params.max_txns.count() >= 1 + && params.max_txns_after_filtering >= 1 + && params.soft_max_txns_after_filtering >= 1 + && params.max_txns.size_in_bytes() >= 1 && nxt_txn_idx < self.txns.len() { tokio::time::sleep(Duration::from_millis(1)).await; let txn = self.txns[nxt_txn_idx].clone(); let txn_size = txn.raw_txn_bytes_len() as u64; - if txn_size > max_bytes { + if txn_size > params.max_txns.size_in_bytes() { break; } - max_items -= 1; - max_bytes -= txn_size; + params.max_txns = PayloadTxnsSize::new( + params.max_txns.count() - 1, + params.max_txns.size_in_bytes() - txn_size, + ); + params.max_txns_after_filtering -= 1; + params.soft_max_txns_after_filtering -= 1; nxt_txn_idx += 1; txns.push(txn); } diff --git a/consensus/src/payload_client/user/quorum_store_client.rs b/consensus/src/payload_client/user/quorum_store_client.rs index c6f49a2f1e6c9..b145ba1f76f61 100644 --- a/consensus/src/payload_client/user/quorum_store_client.rs +++ b/consensus/src/payload_client/user/quorum_store_client.rs @@ -2,12 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - counters::WAIT_FOR_FULL_BLOCKS_TRIGGERED, error::QuorumStoreError, monitor, - payload_client::user::UserPayloadClient, + counters::WAIT_FOR_FULL_BLOCKS_TRIGGERED, + error::QuorumStoreError, + monitor, + payload_client::{user::UserPayloadClient, PayloadPullParameters}, }; use aptos_consensus_types::{ common::{Payload, PayloadFilter}, - request_response::{GetPayloadCommand, GetPayloadResponse}, + request_response::{GetPayloadCommand, GetPayloadRequest, GetPayloadResponse}, + utils::PayloadTxnsSize, }; use aptos_logger::info; use fail::fail_point; @@ -45,23 +48,27 @@ impl QuorumStoreClient { async fn pull_internal( &self, - max_items: u64, - max_bytes: u64, - max_inline_items: u64, - max_inline_bytes: u64, + max_txns: PayloadTxnsSize, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + max_inline_txns: PayloadTxnsSize, + txns_with_proofs_pct: u8, return_non_full: bool, exclude_payloads: PayloadFilter, + block_timestamp: Duration, ) -> anyhow::Result { let (callback, callback_rcv) = oneshot::channel(); - let req = GetPayloadCommand::GetPayloadRequest( - max_items, - max_bytes, - max_inline_items, - max_inline_bytes, + let req = GetPayloadCommand::GetPayloadRequest(GetPayloadRequest { + max_txns, + max_txns_after_filtering, + soft_max_txns_after_filtering, + opt_batch_txns_pct: txns_with_proofs_pct, + max_inline_txns, + filter: exclude_payloads, return_non_full, - exclude_payloads.clone(), callback, - ); + block_timestamp, + }); // send to shared mempool self.consensus_to_quorum_store_sender .clone() @@ -86,21 +93,13 @@ impl QuorumStoreClient { impl UserPayloadClient for QuorumStoreClient { async fn pull( &self, - max_poll_time: Duration, - max_items: u64, - max_bytes: u64, - max_inline_items: u64, - max_inline_bytes: u64, - exclude: PayloadFilter, + params: PayloadPullParameters, wait_callback: BoxFuture<'static, ()>, - pending_ordering: bool, - pending_uncommitted_blocks: usize, - recent_max_fill_fraction: f32, ) -> anyhow::Result { - let return_non_full = recent_max_fill_fraction + let return_non_full = params.recent_max_fill_fraction < self.wait_for_full_blocks_above_recent_fill_threshold - && pending_uncommitted_blocks < self.wait_for_full_blocks_above_pending_blocks; - let return_empty = pending_ordering && return_non_full; + && params.pending_uncommitted_blocks < self.wait_for_full_blocks_above_pending_blocks; + let return_empty = params.pending_ordering && return_non_full; WAIT_FOR_FULL_BLOCKS_TRIGGERED.observe(if !return_non_full { 1.0 } else { 0.0 }); @@ -113,15 +112,17 @@ impl UserPayloadClient for QuorumStoreClient { let payload = loop { // Make sure we don't wait more than expected, due to thread scheduling delays/processing time consumed - let done = start_time.elapsed() >= max_poll_time; + let done = start_time.elapsed() >= params.max_poll_time; let payload = self .pull_internal( - max_items, - max_bytes, - max_inline_items, - max_inline_bytes, + params.max_txns, + params.max_txns_after_filtering, + params.soft_max_txns_after_filtering, + params.max_inline_txns, + params.opt_batch_txns_pct, return_non_full || return_empty || done, - exclude.clone(), + params.user_txn_filter.clone(), + params.block_timestamp, ) .await?; if payload.is_empty() && !return_empty && !done { @@ -134,14 +135,9 @@ impl UserPayloadClient for QuorumStoreClient { break payload; }; info!( + pull_params = ?params, elapsed_time_ms = start_time.elapsed().as_millis() as u64, - max_poll_time_ms = max_poll_time.as_millis() as u64, payload_len = payload.len(), - max_items = max_items, - max_bytes = max_bytes, - max_inline_items = max_inline_items, - max_inline_bytes = max_inline_bytes, - pending_ordering = pending_ordering, return_empty = return_empty, return_non_full = return_non_full, duration_ms = start_time.elapsed().as_millis() as u64, diff --git a/consensus/src/payload_manager.rs b/consensus/src/payload_manager.rs index 56b96e20742cc..c2e7c580fb9b3 100644 --- a/consensus/src/payload_manager.rs +++ b/consensus/src/payload_manager.rs @@ -3,59 +3,125 @@ use crate::{ consensus_observer::{ - network_message::ConsensusObserverMessage, observer::ObserverDataStatus, - publisher::ConsensusPublisher, + network::observer_message::{BlockTransactionPayload, ConsensusObserverMessage}, + observer::payload_store::BlockPayloadStatus, + publisher::consensus_publisher::ConsensusPublisher, }, counters, quorum_store::{batch_store::BatchReader, quorum_store_coordinator::CoordinatorCommand}, }; use aptos_consensus_types::{ block::Block, - common::{DataStatus, Payload, ProofWithData}, - proof_of_store::ProofOfStore, + common::{DataStatus, Payload, ProofWithData, Round}, + payload::{BatchPointer, DataFetchFut, TDataInfo}, + proof_of_store::BatchInfo, }; use aptos_crypto::HashValue; -use aptos_executor_types::{ExecutorError::DataNotFound, *}; +use aptos_executor_types::{ + ExecutorError::{DataNotFound, InternalError}, + *, +}; use aptos_infallible::Mutex; use aptos_logger::prelude::*; -use aptos_types::transaction::SignedTransaction; -use futures::channel::mpsc::Sender; -use itertools::Either; +use aptos_types::{transaction::SignedTransaction, PeerId}; +use async_trait::async_trait; +use futures::{channel::mpsc::Sender, FutureExt}; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{btree_map::Entry, BTreeMap}, + ops::Deref, sync::Arc, - time::Duration, }; -use tokio::{sync::oneshot, time::timeout}; +use tokio::sync::oneshot; +/// A trait that defines the interface for a payload manager. The payload manager is responsible for +/// resolving the transactions in a block's payload. +#[async_trait] pub trait TPayloadManager: Send + Sync { + /// Notify the payload manager that a block has been committed. This indicates that the + /// transactions in the block's payload are no longer required for consensus. + fn notify_commit(&self, block_timestamp: u64, payloads: Vec); + + /// Prefetch the data for a payload. This is used to ensure that the data for a payload is + /// available when block is executed. fn prefetch_payload_data(&self, payload: &Payload, timestamp: u64); + + /// Check if the transactions corresponding are available. This is specific to payload + /// manager implementations. For optimistic quorum store, we only check if optimistic + /// batches are available locally. + fn check_payload_availability(&self, block: &Block) -> bool; + + /// Get the transactions in a block's payload. This function returns a vector of transactions. + async fn get_transactions( + &self, + block: &Block, + ) -> ExecutorResult<(Vec, Option)>; } -/// Responsible to extract the transactions out of the payload and notify QuorumStore about commits. -/// If QuorumStore is enabled, has to ask BatchReader for the transaction behind the proofs of availability in the payload. -pub enum PayloadManager { - DirectMempool, - InQuorumStore( - Arc, - Sender, - Option>, - ), - ConsensusObserver( - Arc>>, - Option>, - ), +/// A payload manager that directly returns the transactions in a block's payload. +pub struct DirectMempoolPayloadManager {} + +impl DirectMempoolPayloadManager { + pub fn new() -> Self { + Self {} + } } -impl TPayloadManager for PayloadManager { - fn prefetch_payload_data(&self, payload: &Payload, timestamp: u64) { - self.prefetch_payload_data(payload, timestamp); +#[async_trait] +impl TPayloadManager for DirectMempoolPayloadManager { + fn notify_commit(&self, _block_timestamp: u64, _payloads: Vec) {} + + fn prefetch_payload_data(&self, _payload: &Payload, _timestamp: u64) {} + + fn check_payload_availability(&self, _block: &Block) -> bool { + true } + + async fn get_transactions( + &self, + block: &Block, + ) -> ExecutorResult<(Vec, Option)> { + let Some(payload) = block.payload() else { + return Ok((Vec::new(), None)); + }; + + match payload { + Payload::DirectMempool(txns) => Ok((txns.clone(), None)), + _ => unreachable!( + "DirectMempoolPayloadManager: Unacceptable payload type {}. Epoch: {}, Round: {}, Block: {}", + payload, + block.block_data().epoch(), + block.block_data().round(), + block.id() + ), + } + } +} + +/// A payload manager that resolves the transactions in a block's payload from the quorum store. +pub struct QuorumStorePayloadManager { + batch_reader: Arc, + coordinator_tx: Sender, + maybe_consensus_publisher: Option>, + ordered_authors: Vec, } -impl PayloadManager { +impl QuorumStorePayloadManager { + pub fn new( + batch_reader: Arc, + coordinator_tx: Sender, + maybe_consensus_publisher: Option>, + ordered_authors: Vec, + ) -> Self { + Self { + batch_reader, + coordinator_tx, + maybe_consensus_publisher, + ordered_authors, + } + } + fn request_transactions( - proofs: Vec, + batches: Vec<(BatchInfo, Vec)>, block_timestamp: u64, batch_reader: Arc, ) -> Vec<( @@ -63,87 +129,104 @@ impl PayloadManager { oneshot::Receiver>>, )> { let mut receivers = Vec::new(); - for pos in proofs { + for (batch_info, responders) in batches { trace!( - "QSE: requesting pos {:?}, digest {}, time = {}", - pos, - pos.digest(), + "QSE: requesting batch {:?}, time = {}", + batch_info, block_timestamp ); - if block_timestamp <= pos.expiration() { - receivers.push((*pos.digest(), batch_reader.get_batch(pos))); + if block_timestamp <= batch_info.expiration() { + receivers.push(( + *batch_info.digest(), + batch_reader.get_batch( + *batch_info.digest(), + batch_info.expiration(), + responders, + ), + )); } else { - debug!("QSE: skipped expired pos {}", pos.digest()); + debug!("QSE: skipped expired batch {}", batch_info.digest()); } } receivers } +} - ///Pass commit information to BatchReader and QuorumStore wrapper for their internal cleanups. - pub fn notify_commit(&self, block_timestamp: u64, payloads: Vec) { - match self { - PayloadManager::DirectMempool | PayloadManager::ConsensusObserver(_, _) => {}, - PayloadManager::InQuorumStore(batch_reader, coordinator_tx, _) => { - batch_reader.update_certified_timestamp(block_timestamp); - - let batches: Vec<_> = payloads - .into_iter() - .flat_map(|payload| match payload { - Payload::DirectMempool(_) => { - unreachable!("InQuorumStore should be used"); - }, - Payload::InQuorumStore(proof_with_status) => proof_with_status - .proofs - .iter() - .map(|proof| proof.info().clone()) - .collect::>(), - Payload::InQuorumStoreWithLimit(proof_with_status) => proof_with_status - .proof_with_data - .proofs - .iter() - .map(|proof| proof.info().clone()) - .collect::>(), - Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) => { - inline_batches +#[async_trait] +impl TPayloadManager for QuorumStorePayloadManager { + fn notify_commit(&self, block_timestamp: u64, payloads: Vec) { + self.batch_reader + .update_certified_timestamp(block_timestamp); + + let batches: Vec<_> = payloads + .into_iter() + .flat_map(|payload| match payload { + Payload::DirectMempool(_) => { + unreachable!("InQuorumStore should be used"); + }, + Payload::InQuorumStore(proof_with_status) => proof_with_status + .proofs + .iter() + .map(|proof| proof.info().clone()) + .collect::>(), + Payload::InQuorumStoreWithLimit(proof_with_status) => proof_with_status + .proof_with_data + .proofs + .iter() + .map(|proof| proof.info().clone()) + .collect::>(), + Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) => { + inline_batches + .iter() + .map(|(batch_info, _)| batch_info.clone()) + .chain( + proof_with_data + .proofs .iter() - .map(|(batch_info, _)| batch_info.clone()) - .chain( - proof_with_data - .proofs - .iter() - .map(|proof| proof.info().clone()), - ) - .collect::>() - }, - }) - .collect(); + .map(|proof| proof.info().clone()), + ) + .collect::>() + }, + Payload::OptQuorumStore(opt_quorum_store_payload) => { + opt_quorum_store_payload.into_inner().get_all_batch_infos() + }, + }) + .collect(); - let mut tx = coordinator_tx.clone(); + let mut tx = self.coordinator_tx.clone(); - if let Err(e) = tx.try_send(CoordinatorCommand::CommitNotification( - block_timestamp, - batches, - )) { - warn!( - "CommitNotification failed. Is the epoch shutting down? error: {}", - e - ); - } - }, + if let Err(e) = tx.try_send(CoordinatorCommand::CommitNotification( + block_timestamp, + batches, + )) { + warn!( + "CommitNotification failed. Is the epoch shutting down? error: {}", + e + ); } } - /// Called from consensus to pre-fetch the transaction behind the batches in the block. - pub fn prefetch_payload_data(&self, payload: &Payload, timestamp: u64) { + fn prefetch_payload_data(&self, payload: &Payload, timestamp: u64) { + // This is deprecated. + // TODO(ibalajiarun): Remove this after migrating to OptQuorumStore type let request_txns_and_update_status = move |proof_with_status: &ProofWithData, batch_reader: Arc| { if proof_with_status.status.lock().is_some() { return; } - let receivers = PayloadManager::request_transactions( - proof_with_status.proofs.clone(), + let receivers = Self::request_transactions( + proof_with_status + .proofs + .iter() + .map(|proof| { + ( + proof.info().clone(), + proof.shuffled_signers(&self.ordered_authors), + ) + }) + .collect(), timestamp, - batch_reader.clone(), + batch_reader, ); proof_with_status .status @@ -151,184 +234,138 @@ impl PayloadManager { .replace(DataStatus::Requested(receivers)); }; - match self { - PayloadManager::DirectMempool | PayloadManager::ConsensusObserver(_, _) => {}, - PayloadManager::InQuorumStore(batch_reader, _, _) => match payload { - Payload::InQuorumStore(proof_with_status) => { - request_txns_and_update_status(proof_with_status, batch_reader.clone()); - }, - Payload::InQuorumStoreWithLimit(proof_with_data) => { - request_txns_and_update_status( - &proof_with_data.proof_with_data, - batch_reader.clone(), - ); - }, - Payload::QuorumStoreInlineHybrid(_, proof_with_data, _) => { - request_txns_and_update_status(proof_with_data, batch_reader.clone()); - }, - Payload::DirectMempool(_) => { - unreachable!() - }, - }, + fn prefetch_helper( + data_pointer: &BatchPointer, + batch_reader: Arc, + timestamp: u64, + ordered_authors: &[PeerId], + ) { + let mut data_fut = data_pointer.data_fut.lock(); + if data_fut.is_some() { + return; + } + + let batches_and_responders = data_pointer + .batch_summary + .iter() + .map(|proof| { + let signers = proof.signers(ordered_authors); + // TODO(ibalajiarun): Add block author to signers + (proof.info().clone(), signers) + }) + .collect(); + let fut = + request_txns_from_quorum_store(batches_and_responders, timestamp, batch_reader) + .boxed() + .shared(); + *data_fut = Some(DataFetchFut { fut, iteration: 0 }) } - } - /// Extract transaction from a given block - /// Assumes it is never called for the same block concurrently. Otherwise status can be None. - pub async fn get_transactions( - &self, - block: &Block, - ) -> ExecutorResult<(Vec, Option)> { - let payload = match block.payload() { - Some(p) => p, - None => return Ok((Vec::new(), None)), + match payload { + Payload::InQuorumStore(proof_with_status) => { + request_txns_and_update_status(proof_with_status, self.batch_reader.clone()); + }, + Payload::InQuorumStoreWithLimit(proof_with_data) => { + request_txns_and_update_status( + &proof_with_data.proof_with_data, + self.batch_reader.clone(), + ); + }, + Payload::QuorumStoreInlineHybrid(_, proof_with_data, _) => { + request_txns_and_update_status(proof_with_data, self.batch_reader.clone()); + }, + Payload::DirectMempool(_) => { + unreachable!() + }, + Payload::OptQuorumStore(opt_qs_payload) => { + prefetch_helper( + opt_qs_payload.opt_batches(), + self.batch_reader.clone(), + timestamp, + &self.ordered_authors, + ); + prefetch_helper( + opt_qs_payload.proof_with_data(), + self.batch_reader.clone(), + timestamp, + &self.ordered_authors, + ) + }, }; + } - if let PayloadManager::ConsensusObserver(txns_pool, consensus_publisher) = self { - // If the data is already available, return it, otherwise put the tx in the pool and wait for it. - // It's important to make sure this doesn't race with the payload insertion part. - let result = match txns_pool.lock().entry(block.id()) { - Entry::Occupied(mut value) => match value.get_mut() { - ObserverDataStatus::Available(data) => Either::Left(data.clone()), - ObserverDataStatus::Requested(tx) => { - let (new_tx, rx) = oneshot::channel(); - *tx = new_tx; - Either::Right(rx) - }, - }, - Entry::Vacant(entry) => { - let (tx, rx) = oneshot::channel(); - entry.insert(ObserverDataStatus::Requested(tx)); - Either::Right(rx) - }, - }; - let block_transaction_payload = match result { - Either::Left(data) => data, - Either::Right(rx) => timeout(Duration::from_millis(300), rx) - .await - .map_err(|_| ExecutorError::CouldNotGetData)? - .map_err(|_| ExecutorError::CouldNotGetData)?, - }; - if let Some(consensus_publisher) = consensus_publisher { - let message = ConsensusObserverMessage::new_block_payload_message( - block.gen_block_info(HashValue::zero(), 0, None), - block_transaction_payload.transactions.clone(), - block_transaction_payload.limit, - ); - consensus_publisher.publish_message(message).await; - } - return Ok(( - block_transaction_payload.transactions, - block_transaction_payload.limit, - )); - } + fn check_payload_availability(&self, block: &Block) -> bool { + let Some(payload) = block.payload() else { + return true; + }; - async fn process_payload( - proof_with_data: &ProofWithData, - batch_reader: Arc, - block: &Block, - ) -> ExecutorResult> { - let status = proof_with_data.status.lock().take(); - match status.expect("Should have been updated before.") { - DataStatus::Cached(data) => { - counters::QUORUM_BATCH_READY_COUNT.inc(); - proof_with_data - .status - .lock() - .replace(DataStatus::Cached(data.clone())); - Ok(data) - }, - DataStatus::Requested(receivers) => { - let _timer = counters::BATCH_WAIT_DURATION.start_timer(); - let mut vec_ret = Vec::new(); - if !receivers.is_empty() { - debug!( - "QSE: waiting for data on {} receivers, block_round {}", - receivers.len(), - block.round() - ); - } - for (digest, rx) in receivers { - match rx.await { - Err(e) => { - // We probably advanced epoch already. - warn!( - "Oneshot channel to get a batch was dropped with error {:?}", - e - ); - let new_receivers = PayloadManager::request_transactions( - proof_with_data.proofs.clone(), - block.timestamp_usecs(), - batch_reader.clone(), - ); - // Could not get all data so requested again - proof_with_data - .status - .lock() - .replace(DataStatus::Requested(new_receivers)); - return Err(DataNotFound(digest)); - }, - Ok(Ok(data)) => { - vec_ret.push(data); - }, - Ok(Err(e)) => { - let new_receivers = PayloadManager::request_transactions( - proof_with_data.proofs.clone(), - block.timestamp_usecs(), - batch_reader.clone(), - ); - // Could not get all data so requested again - proof_with_data - .status - .lock() - .replace(DataStatus::Requested(new_receivers)); - return Err(e); - }, - } + match payload { + Payload::DirectMempool(_) => { + unreachable!("QuorumStore doesn't support DirectMempool payload") + }, + Payload::InQuorumStore(_) => true, + Payload::InQuorumStoreWithLimit(_) => true, + Payload::QuorumStoreInlineHybrid(_, _, _) => true, + Payload::OptQuorumStore(opt_qs_payload) => { + for batch in opt_qs_payload.opt_batches().deref() { + if self.batch_reader.exists(batch.digest()).is_none() { + return false; } - let ret: Vec = vec_ret.into_iter().flatten().collect(); - // execution asks for the data twice, so data is cached here for the second time. - proof_with_data - .status - .lock() - .replace(DataStatus::Cached(ret.clone())); - Ok(ret) - }, - } + } + true + }, } + } - let result = match (self, payload) { - (PayloadManager::DirectMempool, Payload::DirectMempool(txns)) => (txns.clone(), None), - ( - PayloadManager::InQuorumStore(batch_reader, _, _), - Payload::InQuorumStore(proof_with_data), - ) => ( - process_payload(proof_with_data, batch_reader.clone(), block).await?, - None, - ), - ( - PayloadManager::InQuorumStore(batch_reader, _, _), - Payload::InQuorumStoreWithLimit(proof_with_data), - ) => ( - process_payload( + async fn get_transactions( + &self, + block: &Block, + ) -> ExecutorResult<(Vec, Option)> { + let Some(payload) = block.payload() else { + return Ok((Vec::new(), None)); + }; + + let transaction_payload = match payload { + Payload::InQuorumStore(proof_with_data) => { + let transactions = process_payload( + proof_with_data, + self.batch_reader.clone(), + block, + &self.ordered_authors, + ) + .await?; + BlockTransactionPayload::new_in_quorum_store( + transactions, + proof_with_data.proofs.clone(), + ) + }, + Payload::InQuorumStoreWithLimit(proof_with_data) => { + let transactions = process_payload( &proof_with_data.proof_with_data, - batch_reader.clone(), + self.batch_reader.clone(), block, + &self.ordered_authors, ) - .await?, - proof_with_data.max_txns_to_execute, - ), - ( - PayloadManager::InQuorumStore(batch_reader, _, _), - Payload::QuorumStoreInlineHybrid( - inline_batches, - proof_with_data, - max_txns_to_execute, - ), - ) => ( - { - let mut all_txns = - process_payload(proof_with_data, batch_reader.clone(), block).await?; + .await?; + BlockTransactionPayload::new_in_quorum_store_with_limit( + transactions, + proof_with_data.proof_with_data.proofs.clone(), + proof_with_data.max_txns_to_execute, + ) + }, + Payload::QuorumStoreInlineHybrid( + inline_batches, + proof_with_data, + max_txns_to_execute, + ) => { + let all_transactions = { + let mut all_txns = process_payload( + proof_with_data, + self.batch_reader.clone(), + block, + &self.ordered_authors, + ) + .await?; all_txns.append( &mut inline_batches .iter() @@ -337,10 +374,47 @@ impl PayloadManager { .collect(), ); all_txns - }, - *max_txns_to_execute, - ), - (_, _) => unreachable!( + }; + let inline_batches = inline_batches + .iter() + .map(|(batch_info, _)| batch_info.clone()) + .collect(); + BlockTransactionPayload::new_quorum_store_inline_hybrid( + all_transactions, + proof_with_data.proofs.clone(), + *max_txns_to_execute, + inline_batches, + ) + }, + Payload::OptQuorumStore(opt_qs_payload) => { + let opt_batch_txns = process_payload_helper( + opt_qs_payload.opt_batches(), + self.batch_reader.clone(), + block, + &self.ordered_authors, + ) + .await?; + let proof_batch_txns = process_payload_helper( + opt_qs_payload.proof_with_data(), + self.batch_reader.clone(), + block, + &self.ordered_authors, + ) + .await?; + let inline_batch_txns = opt_qs_payload.inline_batches().transactions(); + let all_txns = [opt_batch_txns, proof_batch_txns, inline_batch_txns].concat(); + BlockTransactionPayload::new_opt_quorum_store( + all_txns, + opt_qs_payload.proof_with_data().deref().clone(), + opt_qs_payload.max_txns_to_execute(), + [ + opt_qs_payload.opt_batches().deref().clone(), + opt_qs_payload.inline_batches().batch_infos(), + ] + .concat(), + ) + }, + _ => unreachable!( "Wrong payload {} epoch {}, round {}, id {}", payload, block.block_data().epoch(), @@ -348,14 +422,278 @@ impl PayloadManager { block.id() ), }; - if let PayloadManager::InQuorumStore(_, _, Some(consensus_publisher)) = self { + + if let Some(consensus_publisher) = &self.maybe_consensus_publisher { let message = ConsensusObserverMessage::new_block_payload_message( block.gen_block_info(HashValue::zero(), 0, None), - result.0.clone(), - result.1, + transaction_payload.clone(), ); - consensus_publisher.publish_message(message).await; + consensus_publisher.publish_message(message); } - Ok(result) + + Ok(( + transaction_payload.transactions(), + transaction_payload.limit(), + )) + } +} + +/// Returns the transactions for the consensus observer payload manager +async fn get_transactions_for_observer( + block: &Block, + block_payloads: &Arc>>, + consensus_publisher: &Option>, +) -> ExecutorResult<(Vec, Option)> { + // The data should already be available (as consensus observer will only ever + // forward a block to the executor once the data has been received and verified). + let block_payload = match block_payloads.lock().entry((block.epoch(), block.round())) { + Entry::Occupied(mut value) => match value.get_mut() { + BlockPayloadStatus::AvailableAndVerified(block_payload) => block_payload.clone(), + BlockPayloadStatus::AvailableAndUnverified(_) => { + // This shouldn't happen (the payload should already be verified) + let error = format!( + "Payload data for block epoch {}, round {} is unverified!", + block.epoch(), + block.round() + ); + return Err(InternalError { error }); + }, + }, + Entry::Vacant(_) => { + // This shouldn't happen (the payload should already be present) + let error = format!( + "Missing payload data for block epoch {}, round {}!", + block.epoch(), + block.round() + ); + return Err(InternalError { error }); + }, + }; + + // If the payload is valid, publish it to any downstream observers + let transaction_payload = block_payload.transaction_payload(); + if let Some(consensus_publisher) = consensus_publisher { + let message = ConsensusObserverMessage::new_block_payload_message( + block.gen_block_info(HashValue::zero(), 0, None), + transaction_payload.clone(), + ); + consensus_publisher.publish_message(message); + } + + // Return the transactions and the transaction limit + Ok(( + transaction_payload.transactions(), + transaction_payload.limit(), + )) +} + +async fn request_txns_from_quorum_store( + batches_and_responders: Vec<(BatchInfo, Vec)>, + timestamp: u64, + batch_reader: Arc, +) -> ExecutorResult> { + let mut vec_ret = Vec::new(); + let receivers = QuorumStorePayloadManager::request_transactions( + batches_and_responders, + timestamp, + batch_reader, + ); + for (digest, rx) in receivers { + match rx.await { + Err(e) => { + // We probably advanced epoch already. + warn!( + "Oneshot channel to get a batch was dropped with error {:?}", + e + ); + return Err(DataNotFound(digest)); + }, + Ok(Ok(data)) => { + vec_ret.push(data); + }, + Ok(Err(e)) => { + return Err(e); + }, + } + } + let ret: Vec = vec_ret.into_iter().flatten().collect(); + Ok(ret) +} + +async fn process_payload_helper( + data_ptr: &BatchPointer, + batch_reader: Arc, + block: &Block, + ordered_authors: &[PeerId], +) -> ExecutorResult> { + let (iteration, fut) = { + let data_fut_guard = data_ptr.data_fut.lock(); + let data_fut = data_fut_guard.as_ref().expect("must be initialized"); + (data_fut.iteration, data_fut.fut.clone()) + }; + + let result = fut.await; + // If error, reschedule before returning the result + if result.is_err() { + let mut data_fut_guard = data_ptr.data_fut.lock(); + let data_fut = data_fut_guard.as_mut().expect("must be initialized"); + // Protection against race, check the iteration number before rescheduling. + if data_fut.iteration == iteration { + let batches_and_responders = data_ptr + .batch_summary + .iter() + .map(|proof| { + let mut signers = proof.signers(ordered_authors); + if let Some(author) = block.author() { + signers.push(author); + } + (proof.info().clone(), signers) + }) + .collect(); + data_fut.fut = request_txns_from_quorum_store( + batches_and_responders, + block.timestamp_usecs(), + batch_reader, + ) + .boxed() + .shared(); + data_fut.iteration = iteration + 1; + } + } + result +} + +/// This is deprecated. Use `process_payload_helper` instead after migrating to +/// OptQuorumStore payload +async fn process_payload( + proof_with_data: &ProofWithData, + batch_reader: Arc, + block: &Block, + ordered_authors: &[PeerId], +) -> ExecutorResult> { + let status = proof_with_data.status.lock().take(); + match status.expect("Should have been updated before.") { + DataStatus::Cached(data) => { + counters::QUORUM_BATCH_READY_COUNT.inc(); + proof_with_data + .status + .lock() + .replace(DataStatus::Cached(data.clone())); + Ok(data) + }, + DataStatus::Requested(receivers) => { + let _timer = counters::BATCH_WAIT_DURATION.start_timer(); + let mut vec_ret = Vec::new(); + if !receivers.is_empty() { + debug!( + "QSE: waiting for data on {} receivers, block_round {}", + receivers.len(), + block.round() + ); + } + for (digest, rx) in receivers { + match rx.await { + Err(e) => { + // We probably advanced epoch already. + warn!( + "Oneshot channel to get a batch was dropped with error {:?}", + e + ); + let new_receivers = QuorumStorePayloadManager::request_transactions( + proof_with_data + .proofs + .iter() + .map(|proof| { + ( + proof.info().clone(), + proof.shuffled_signers(ordered_authors), + ) + }) + .collect(), + block.timestamp_usecs(), + batch_reader.clone(), + ); + // Could not get all data so requested again + proof_with_data + .status + .lock() + .replace(DataStatus::Requested(new_receivers)); + return Err(DataNotFound(digest)); + }, + Ok(Ok(data)) => { + vec_ret.push(data); + }, + Ok(Err(e)) => { + let new_receivers = QuorumStorePayloadManager::request_transactions( + proof_with_data + .proofs + .iter() + .map(|proof| { + ( + proof.info().clone(), + proof.shuffled_signers(ordered_authors), + ) + }) + .collect(), + block.timestamp_usecs(), + batch_reader.clone(), + ); + // Could not get all data so requested again + proof_with_data + .status + .lock() + .replace(DataStatus::Requested(new_receivers)); + return Err(e); + }, + } + } + let ret: Vec = vec_ret.into_iter().flatten().collect(); + // execution asks for the data twice, so data is cached here for the second time. + proof_with_data + .status + .lock() + .replace(DataStatus::Cached(ret.clone())); + Ok(ret) + }, + } +} + +pub struct ConsensusObserverPayloadManager { + txns_pool: Arc>>, + consensus_publisher: Option>, +} + +impl ConsensusObserverPayloadManager { + pub fn new( + txns_pool: Arc>>, + consensus_publisher: Option>, + ) -> Self { + Self { + txns_pool, + consensus_publisher, + } + } +} + +#[async_trait] +impl TPayloadManager for ConsensusObserverPayloadManager { + fn notify_commit(&self, _block_timestamp: u64, _payloads: Vec) { + // noop + } + + fn prefetch_payload_data(&self, _payload: &Payload, _timestamp: u64) { + // noop + } + + fn check_payload_availability(&self, _block: &Block) -> bool { + unreachable!("this method isn't used in ConsensusObserver") + } + + async fn get_transactions( + &self, + block: &Block, + ) -> ExecutorResult<(Vec, Option)> { + return get_transactions_for_observer(block, &self.txns_pool, &self.consensus_publisher) + .await; } } diff --git a/consensus/src/pending_order_votes.rs b/consensus/src/pending_order_votes.rs index afadb22753d79..94b1ba6d15451 100644 --- a/consensus/src/pending_order_votes.rs +++ b/consensus/src/pending_order_votes.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use aptos_consensus_types::{common::Author, order_vote::OrderVote}; +use aptos_consensus_types::{common::Author, order_vote::OrderVote, quorum_cert::QuorumCert}; use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_logger::prelude::*; use aptos_types::{ @@ -10,7 +10,7 @@ use aptos_types::{ ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures, LedgerInfoWithSignatures}, validator_verifier::{ValidatorVerifier, VerifyError}, }; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; /// Result of the order vote processing. The failure case (Verification error) is returned /// as the Error part of the result. @@ -20,7 +20,8 @@ pub enum OrderVoteReceptionResult { /// QC currently has. VoteAdded(u128), /// This block has just been certified after adding the vote. - NewLedgerInfoWithSignatures(LedgerInfoWithSignatures), + /// Returns the created order certificate and the QC on which the order certificate is based. + NewLedgerInfoWithSignatures((Arc, LedgerInfoWithSignatures)), /// There might be some issues adding a vote ErrorAddingVote(VerifyError), /// Error happens when aggregating signature @@ -39,7 +40,9 @@ enum OrderVoteStatus { pub struct PendingOrderVotes { /// Maps LedgerInfo digest to associated signatures (contained in a partial LedgerInfoWithSignatures). /// Order vote status stores caches the information on whether the votes are enough to form a QC. - li_digest_to_votes: HashMap, + /// We also store the QC that the order votes certify. + li_digest_to_votes: + HashMap, } impl PendingOrderVotes { @@ -50,29 +53,42 @@ impl PendingOrderVotes { } } + pub fn exists(&self, li_digest: &HashValue) -> bool { + self.li_digest_to_votes.contains_key(li_digest) + } + /// Add a vote to the pending votes // TODO: Should we add any counters here? pub fn insert_order_vote( &mut self, order_vote: &OrderVote, validator_verifier: &ValidatorVerifier, + verified_quorum_cert: Option, ) -> OrderVoteReceptionResult { // derive data from order vote let li_digest = order_vote.ledger_info().hash(); // obtain the ledger info with signatures associated to the order vote's ledger info - let status = self.li_digest_to_votes.entry(li_digest).or_insert_with(|| { + let (quorum_cert, status) = self.li_digest_to_votes.entry(li_digest).or_insert_with(|| { // if the ledger info with signatures doesn't exist yet, create it - OrderVoteStatus::NotEnoughVotes(LedgerInfoWithPartialSignatures::new( - order_vote.ledger_info().clone(), - PartialSignatures::empty(), - )) + ( + verified_quorum_cert.expect( + "Quorum Cert is expected when creating a new entry in pending order votes", + ), + OrderVoteStatus::NotEnoughVotes(LedgerInfoWithPartialSignatures::new( + order_vote.ledger_info().clone(), + PartialSignatures::empty(), + )), + ) }); match status { OrderVoteStatus::EnoughVotes(li_with_sig) => { // we already have enough votes for this ledger info - OrderVoteReceptionResult::NewLedgerInfoWithSignatures(li_with_sig.clone()) + OrderVoteReceptionResult::NewLedgerInfoWithSignatures(( + Arc::new(quorum_cert.clone()), + li_with_sig.clone(), + )) }, OrderVoteStatus::NotEnoughVotes(li_with_sig) => { // we don't have enough votes for this ledger info yet @@ -85,7 +101,8 @@ impl PendingOrderVotes { ); return OrderVoteReceptionResult::UnknownAuthor(order_vote.author()); } - let validator_voting_power = validator_voting_power.unwrap(); + let validator_voting_power = + validator_voting_power.expect("Author must exist in the validator set."); if validator_voting_power == 0 { warn!( @@ -106,9 +123,10 @@ impl PendingOrderVotes { Ok(ledger_info_with_sig) => { *status = OrderVoteStatus::EnoughVotes(ledger_info_with_sig.clone()); - OrderVoteReceptionResult::NewLedgerInfoWithSignatures( + OrderVoteReceptionResult::NewLedgerInfoWithSignatures(( + Arc::new(quorum_cert.clone()), ledger_info_with_sig, - ) + )) }, Err(e) => OrderVoteReceptionResult::ErrorAggregatingSignature(e), } @@ -134,19 +152,21 @@ impl PendingOrderVotes { // Removes votes older than highest_ordered_round pub fn garbage_collect(&mut self, highest_ordered_round: u64) { - self.li_digest_to_votes.retain(|_, status| match status { - OrderVoteStatus::EnoughVotes(li_with_sig) => { - li_with_sig.ledger_info().round() > highest_ordered_round - }, - OrderVoteStatus::NotEnoughVotes(li_with_sig) => { - li_with_sig.ledger_info().round() > highest_ordered_round - }, - }); + self.li_digest_to_votes + .retain(|_, (_, status)| match status { + OrderVoteStatus::EnoughVotes(li_with_sig) => { + li_with_sig.ledger_info().round() > highest_ordered_round + }, + OrderVoteStatus::NotEnoughVotes(li_with_sig) => { + li_with_sig.ledger_info().round() > highest_ordered_round + }, + }); } pub fn has_enough_order_votes(&self, ledger_info: &LedgerInfo) -> bool { let li_digest = ledger_info.hash(); - if let Some(OrderVoteStatus::EnoughVotes(_)) = self.li_digest_to_votes.get(&li_digest) { + if let Some((_, OrderVoteStatus::EnoughVotes(_))) = self.li_digest_to_votes.get(&li_digest) + { return true; } false @@ -156,7 +176,7 @@ impl PendingOrderVotes { #[cfg(test)] mod tests { use super::{OrderVoteReceptionResult, PendingOrderVotes}; - use aptos_consensus_types::order_vote::OrderVote; + use aptos_consensus_types::{order_vote::OrderVote, quorum_cert::QuorumCert}; use aptos_crypto::HashValue; use aptos_types::{ block_info::BlockInfo, ledger_info::LedgerInfo, @@ -181,6 +201,7 @@ mod tests { // create random vote from validator[0] let li1 = random_ledger_info(); + let qc = QuorumCert::dummy(); let order_vote_1_author_0 = OrderVote::new_with_signature( signers[0].author(), li1.clone(), @@ -189,13 +210,21 @@ mod tests { // first time a new order vote is added -> OrderVoteAdded assert_eq!( - pending_order_votes.insert_order_vote(&order_vote_1_author_0, &validator), - OrderVoteReceptionResult::VoteAdded(1) + pending_order_votes.insert_order_vote( + &order_vote_1_author_0, + &validator, + Some(qc.clone()) + ), + OrderVoteReceptionResult::VoteAdded(1), ); // same author voting for the same thing -> OrderVoteAdded assert_eq!( - pending_order_votes.insert_order_vote(&order_vote_1_author_0, &validator), + pending_order_votes.insert_order_vote( + &order_vote_1_author_0, + &validator, + Some(qc.clone()) + ), OrderVoteReceptionResult::VoteAdded(1) ); @@ -207,8 +236,12 @@ mod tests { signers[1].sign(&li2).expect("Unable to sign ledger info"), ); assert_eq!( - pending_order_votes.insert_order_vote(&order_vote_2_author_1, &validator), - OrderVoteReceptionResult::VoteAdded(1) + pending_order_votes.insert_order_vote( + &order_vote_2_author_1, + &validator, + Some(qc.clone()) + ), + OrderVoteReceptionResult::VoteAdded(1), ); assert!(!pending_order_votes.has_enough_order_votes(&li1)); @@ -219,8 +252,12 @@ mod tests { li2.clone(), signers[2].sign(&li2).expect("Unable to sign ledger info"), ); - match pending_order_votes.insert_order_vote(&order_vote_2_author_2, &validator) { - OrderVoteReceptionResult::NewLedgerInfoWithSignatures(li_with_sig) => { + match pending_order_votes.insert_order_vote( + &order_vote_2_author_2, + &validator, + Some(qc.clone()), + ) { + OrderVoteReceptionResult::NewLedgerInfoWithSignatures((_, li_with_sig)) => { assert!(li_with_sig.check_voting_power(&validator).is_ok()); }, _ => { diff --git a/consensus/src/pending_votes.rs b/consensus/src/pending_votes.rs index 56af5416a3a20..ff8bc37a1ae70 100644 --- a/consensus/src/pending_votes.rs +++ b/consensus/src/pending_votes.rs @@ -291,23 +291,35 @@ impl PendingVotes { vote: Vote, ) -> VoteReceptionResult { let li_digest = vote.ledger_info().hash(); - let (_, li_with_sig) = self.li_digest_to_votes.get_mut(&li_digest).unwrap(); - match validator_verifier.check_voting_power(li_with_sig.signatures().keys(), true) { - // a quorum of signature was reached, a new QC is formed - Ok(_) => Self::aggregate_qc_now(validator_verifier, li_with_sig, vote.vote_data()), - - // not enough votes - Err(VerifyError::TooLittleVotingPower { .. }) => { - panic!("Delayed QC aggregation should not be triggered if we don't have enough votes to form a QC"); - }, + match self.li_digest_to_votes.get_mut(&li_digest) { + Some((_, li_with_sig)) => { + match validator_verifier.check_voting_power(li_with_sig.signatures().keys(), true) { + // a quorum of signature was reached, a new QC is formed + Ok(_) => { + Self::aggregate_qc_now(validator_verifier, li_with_sig, vote.vote_data()) + }, + + // not enough votes + Err(VerifyError::TooLittleVotingPower { .. }) => { + panic!("Delayed QC aggregation should not be triggered if we don't have enough votes to form a QC"); + }, - // error - Err(error) => { + // error + Err(error) => { + error!( + "MUST_FIX: vote received could not be added: {}, vote: {}", + error, vote + ); + VoteReceptionResult::ErrorAddingVote(error) + }, + } + }, + None => { error!( - "MUST_FIX: vote received could not be added: {}, vote: {}", - error, vote + "No LedgerInfoWithSignatures found for the given digest: {}", + li_digest ); - VoteReceptionResult::ErrorAddingVote(error) + VoteReceptionResult::ErrorAddingVote(VerifyError::EmptySignature) }, } } diff --git a/consensus/src/pipeline/buffer.rs b/consensus/src/pipeline/buffer.rs index 29ead7aba94d8..ed4514bd20d6d 100644 --- a/consensus/src/pipeline/buffer.rs +++ b/consensus/src/pipeline/buffer.rs @@ -48,6 +48,7 @@ impl Buffer { &self.tail } + #[allow(clippy::unwrap_used)] pub fn push_back(&mut self, elem: T) { self.count = self.count.checked_add(1).unwrap(); let t_hash = elem.hash(); @@ -63,6 +64,7 @@ impl Buffer { self.head.get_or_insert(t_hash); } + #[allow(clippy::unwrap_used)] pub fn pop_front(&mut self) -> Option { self.head.take().map(|head| { let mut item = self.map.remove(&head).unwrap(); @@ -77,10 +79,12 @@ impl Buffer { } // utils - assuming item is not None + #[allow(clippy::unwrap_used)] pub fn get_next(&self, cursor: &Cursor) -> Cursor { self.map.get(cursor.as_ref().unwrap()).unwrap().next } + #[allow(clippy::unwrap_used)] pub fn get(&self, cursor: &Cursor) -> &T { self.map .get(cursor.as_ref().unwrap()) @@ -90,6 +94,7 @@ impl Buffer { .unwrap() } + #[allow(clippy::unwrap_used)] pub fn set(&mut self, cursor: &Cursor, new_val: T) { self.map .get_mut(cursor.as_ref().unwrap()) @@ -98,6 +103,7 @@ impl Buffer { .replace(new_val); } + #[allow(clippy::unwrap_used)] pub fn take(&mut self, cursor: &Cursor) -> T { self.map .get_mut(cursor.as_ref().unwrap()) @@ -130,7 +136,7 @@ impl Buffer { /// we make sure that the element found by the key is after `cursor` /// if `cursor` is None, this function returns None (same as find_elem) pub fn find_elem_by_key(&self, cursor: Cursor, key: HashValue) -> Cursor { - let cursor_order = self.map.get(cursor.as_ref()?).unwrap().index; + let cursor_order = self.map.get(cursor.as_ref()?)?.index; let item = self.map.get(&key)?; if item.index >= cursor_order { Some(key) diff --git a/consensus/src/pipeline/buffer_item.rs b/consensus/src/pipeline/buffer_item.rs index 56a3c3eabe230..50e7eebe25fd9 100644 --- a/consensus/src/pipeline/buffer_item.rs +++ b/consensus/src/pipeline/buffer_item.rs @@ -5,7 +5,9 @@ use crate::{pipeline::hashable::Hashable, state_replication::StateComputerCommitCallBackType}; use anyhow::anyhow; use aptos_consensus_types::{ - common::Author, pipeline::commit_vote::CommitVote, pipelined_block::PipelinedBlock, + common::{Author, Round}, + pipeline::commit_vote::CommitVote, + pipelined_block::PipelinedBlock, }; use aptos_crypto::{ed25519, HashValue}; use aptos_executor_types::ExecutorResult; @@ -144,9 +146,10 @@ impl BufferItem { ordered_blocks: Vec, ordered_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, + unverified_signatures: PartialSignatures, ) -> Self { Self::Ordered(Box::new(OrderedItem { - unverified_signatures: PartialSignatures::empty(), + unverified_signatures, commit_proof: None, callback, ordered_blocks, @@ -174,10 +177,16 @@ impl BufferItem { for (b1, b2) in zip_eq(ordered_blocks.iter(), executed_blocks.iter()) { assert_eq!(b1.id(), b2.id()); } - let mut commit_info = executed_blocks.last().unwrap().block_info(); + let mut commit_info = executed_blocks + .last() + .expect("execute_blocks should not be empty!") + .block_info(); match epoch_end_timestamp { Some(timestamp) if commit_info.timestamp_usecs() != timestamp => { - assert!(executed_blocks.last().unwrap().is_reconfiguration_suffix()); + assert!(executed_blocks + .last() + .expect("") + .is_reconfiguration_suffix()); commit_info.change_timestamp(timestamp); }, _ => (), @@ -392,7 +401,17 @@ impl BufferItem { } pub fn block_id(&self) -> HashValue { - self.get_blocks().last().unwrap().id() + self.get_blocks() + .last() + .expect("Vec should not be empty") + .id() + } + + pub fn round(&self) -> Round { + self.get_blocks() + .last() + .expect("Vec should not be empty") + .round() } pub fn add_signature_if_matched(&mut self, vote: CommitVote) -> anyhow::Result<()> { diff --git a/consensus/src/pipeline/buffer_manager.rs b/consensus/src/pipeline/buffer_manager.rs index 68e1ac8c19bd0..38d5aa8578893 100644 --- a/consensus/src/pipeline/buffer_manager.rs +++ b/consensus/src/pipeline/buffer_manager.rs @@ -5,9 +5,11 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, consensus_observer::{ - network_message::ConsensusObserverMessage, publisher::ConsensusPublisher, + network::observer_message::ConsensusObserverMessage, + publisher::consensus_publisher::ConsensusPublisher, }, - counters, monitor, + counters::{self, log_executor_error_occurred}, + monitor, network::{IncomingCommitRequest, NetworkSender}, network_interface::ConsensusMsg, pipeline::{ @@ -25,17 +27,19 @@ use crate::{ use aptos_bounded_executor::BoundedExecutor; use aptos_config::config::ConsensusObserverConfig; use aptos_consensus_types::{ - common::Author, pipeline::commit_decision::CommitDecision, pipelined_block::PipelinedBlock, + common::{Author, Round}, + pipeline::commit_vote::CommitVote, + pipelined_block::PipelinedBlock, }; use aptos_crypto::HashValue; -use aptos_executor_types::ExecutorError; +use aptos_executor_types::ExecutorResult; use aptos_logger::prelude::*; use aptos_network::protocols::{rpc::error::RpcError, wire::handshake::v1::ProtocolId}; use aptos_reliable_broadcast::{DropGuard, ReliableBroadcast}; use aptos_time_service::TimeService; use aptos_types::{ - account_address::AccountAddress, epoch_change::EpochChangeProof, epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, + account_address::AccountAddress, aggregate_signature::PartialSignatures, + epoch_change::EpochChangeProof, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, }; use bytes::Bytes; use futures::{ @@ -47,9 +51,12 @@ use futures::{ FutureExt, SinkExt, StreamExt, }; use once_cell::sync::OnceCell; -use std::sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, +use std::{ + collections::{BTreeMap, HashMap}, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, }; use tokio::time::{Duration, Instant}; use tokio_retry::strategy::ExponentialBackoff; @@ -77,6 +84,15 @@ pub struct OrderedBlocks { pub callback: StateComputerCommitCallBackType, } +impl OrderedBlocks { + pub fn latest_round(&self) -> Round { + self.ordered_blocks + .last() + .expect("OrderedBlocks empty.") + .round() + } +} + pub type BufferItemRootType = Cursor; pub type Sender = UnboundedSender; pub type Receiver = UnboundedReceiver; @@ -113,8 +129,8 @@ pub struct BufferManager { commit_msg_rx: Option>, - // we don't hear back from the persisting phase persisting_phase_tx: Sender>, + persisting_phase_rx: Receiver>, block_rx: UnboundedReceiver, reset_rx: UnboundedReceiver, @@ -140,10 +156,20 @@ pub struct BufferManager { reset_flag: Arc, bounded_executor: BoundedExecutor, order_vote_enabled: bool, + back_pressure_enabled: bool, + highest_committed_round: Round, + latest_round: Round, // Consensus publisher for downstream observers. consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, + + pending_commit_proofs: BTreeMap, + + max_pending_rounds_in_commit_vote_cache: u64, + // If the buffer manager receives a commit vote for a block that is not in buffer items, then + // the vote will be cached. We can cache upto max_pending_rounds_in_commit_vote_cache (100) blocks. + pending_commit_votes: BTreeMap>, } impl BufferManager { @@ -162,6 +188,7 @@ impl BufferManager { IncomingCommitRequest, >, persisting_phase_tx: Sender>, + persisting_phase_rx: Receiver>, block_rx: UnboundedReceiver, reset_rx: UnboundedReceiver, epoch_state: Arc, @@ -169,8 +196,11 @@ impl BufferManager { reset_flag: Arc, executor: BoundedExecutor, order_vote_enabled: bool, + back_pressure_enabled: bool, + highest_committed_round: Round, consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, + max_pending_rounds_in_commit_vote_cache: u64, ) -> Self { let buffer = Buffer::::new(); @@ -209,6 +239,7 @@ impl BufferManager { commit_msg_rx: Some(commit_msg_rx), persisting_phase_tx, + persisting_phase_rx, block_rx, reset_rx, @@ -225,9 +256,17 @@ impl BufferManager { reset_flag, bounded_executor: executor, order_vote_enabled, + back_pressure_enabled, + highest_committed_round, + latest_round: highest_committed_round, consensus_observer_config, consensus_publisher, + + pending_commit_proofs: BTreeMap::new(), + + max_pending_rounds_in_commit_vote_cache, + pending_commit_votes: BTreeMap::new(), } } @@ -270,6 +309,81 @@ impl BufferManager { }); } + fn try_add_pending_commit_proof(&mut self, commit_proof: LedgerInfoWithSignatures) -> bool { + const MAX_PENDING_COMMIT_PROOFS: usize = 100; + + let round = commit_proof.commit_info().round(); + let block_id = commit_proof.commit_info().id(); + if self.highest_committed_round < round { + if self.pending_commit_proofs.len() < MAX_PENDING_COMMIT_PROOFS { + self.pending_commit_proofs.insert(round, commit_proof); + + info!( + round = round, + block_id = block_id, + "Added pending commit proof." + ); + true + } else { + warn!( + round = round, + block_id = block_id, + "Too many pending commit proofs, ignored." + ); + false + } + } else { + debug!( + round = round, + highest_committed_round = self.highest_committed_round, + block_id = block_id, + "Commit proof too old, ignored." + ); + false + } + } + + fn try_add_pending_commit_vote(&mut self, vote: CommitVote) -> bool { + let block_id = vote.commit_info().id(); + let round = vote.commit_info().round(); + + // Store the commit vote only if it is for one of the next 100 rounds. + if round > self.highest_committed_round + && self.highest_committed_round + self.max_pending_rounds_in_commit_vote_cache > round + { + self.pending_commit_votes + .entry(round) + .or_default() + .insert(vote.author(), vote); + true + } else { + debug!( + round = round, + highest_committed_round = self.highest_committed_round, + block_id = block_id, + "Received a commit vote not in the next 100 rounds, ignored." + ); + false + } + } + + fn drain_pending_commit_proof_till( + &mut self, + round: Round, + ) -> Option { + // split at `round` + let mut remainder = self.pending_commit_proofs.split_off(&(round + 1)); + + // keep the second part after split + std::mem::swap(&mut self.pending_commit_proofs, &mut remainder); + let mut to_remove = remainder; + + // return the last of the first part + to_remove + .pop_last() + .map(|(_round, commit_proof)| commit_proof) + } + /// process incoming ordered blocks /// push them into the buffer and update the roots if they are none. async fn process_ordered_blocks(&mut self, ordered_blocks: OrderedBlocks) { @@ -294,14 +408,30 @@ impl BufferManager { ordered_blocks.clone().into_iter().map(Arc::new).collect(), ordered_proof.clone(), ); - consensus_publisher.publish_message(message).await; + consensus_publisher.publish_message(message); } self.execution_schedule_phase_tx .send(request) .await .expect("Failed to send execution schedule request"); - let item = BufferItem::new_ordered(ordered_blocks, ordered_proof, callback); + let mut unverified_signatures = PartialSignatures::empty(); + if let Some(block) = ordered_blocks.last() { + if let Some(votes) = self.pending_commit_votes.remove(&block.round()) { + votes + .values() + .filter(|vote| vote.commit_info().id() == block.id()) + .for_each(|vote| { + unverified_signatures.add_signature(vote.author(), vote.signature().clone()) + }); + } + } + let item = BufferItem::new_ordered( + ordered_blocks, + ordered_proof, + callback, + unverified_signatures, + ); self.buffer.push_back(item); } @@ -382,7 +512,11 @@ impl BufferManager { } if item.block_id() == target_block_id { let aggregated_item = item.unwrap_aggregated(); - let block = aggregated_item.executed_blocks.last().unwrap().block(); + let block = aggregated_item + .executed_blocks + .last() + .expect("executed_blocks should be not empty") + .block(); observe_block(block.timestamp_usecs(), BlockStage::COMMIT_CERTIFIED); // As all the validators broadcast commit votes directly to all other validators, // the proposer do not have to broadcast commit decision again. @@ -393,10 +527,9 @@ impl BufferManager { self.reset().await; } if let Some(consensus_publisher) = &self.consensus_publisher { - let message = ConsensusObserverMessage::new_commit_decision_message( - CommitDecision::new(commit_proof.clone()), - ); - consensus_publisher.publish_message(message).await; + let message = + ConsensusObserverMessage::new_commit_decision_message(commit_proof.clone()); + consensus_publisher.publish_message(message); } self.persisting_phase_tx .send(self.create_new_request(PersistingRequest { @@ -449,7 +582,16 @@ impl BufferManager { info!("Receive reset"); self.reset_flag.store(true, Ordering::SeqCst); - self.stop = matches!(signal, ResetSignal::Stop); + match signal { + ResetSignal::Stop => self.stop = true, + ResetSignal::TargetRound(round) => { + self.highest_committed_round = round; + self.latest_round = round; + + let _ = self.drain_pending_commit_proof_till(round); + }, + } + self.reset().await; let _ = tx.send(ResetAck::default()); self.reset_flag.store(false, Ordering::SeqCst); @@ -488,6 +630,7 @@ impl BufferManager { } /// If the response is successful, advance the item to Executed, otherwise panic (TODO fix). + #[allow(clippy::unwrap_used)] async fn process_execution_response(&mut self, response: ExecutionResponse) { let ExecutionResponse { block_id, inner } = response; // find the corresponding item, may not exist if a reset or aggregated happened @@ -498,16 +641,12 @@ impl BufferManager { let executed_blocks = match inner { Ok(result) => result, - Err(ExecutorError::CouldNotGetData) => { - warn!("Execution error - CouldNotGetData {}", block_id); - return; - }, - Err(ExecutorError::BlockNotFound(block_id)) => { - warn!("Execution error BlockNotFound {}", block_id); - return; - }, Err(e) => { - error!("Execution error {:?} for {}", e, block_id); + log_executor_error_occurred( + e, + &counters::BUFFER_MANAGER_RECEIVED_EXECUTOR_ERROR_COUNT, + block_id, + ); return; }, }; @@ -543,12 +682,21 @@ impl BufferManager { } let item = self.buffer.take(¤t_cursor); - let new_item = item.advance_to_executed_or_aggregated( + let round = item.round(); + let mut new_item = item.advance_to_executed_or_aggregated( executed_blocks, &self.epoch_state.verifier, self.end_epoch_timestamp.get().cloned(), self.order_vote_enabled, ); + if let Some(commit_proof) = self.drain_pending_commit_proof_till(round) { + if !new_item.is_aggregated() + && commit_proof.ledger_info().commit_info().id() == block_id + { + new_item = new_item.try_advance_to_aggregated_with_ledger_info(commit_proof) + } + } + let aggregated = new_item.is_aggregated(); self.buffer.set(¤t_cursor, new_item); if aggregated { @@ -610,7 +758,7 @@ impl BufferManager { // find the corresponding item let author = vote.author(); let commit_info = vote.commit_info().clone(); - info!("Receive commit vote {} from {}", commit_info, author); + trace!("Receive commit vote {} from {}", commit_info, author); let target_block_id = vote.commit_info().id(); let current_cursor = self .buffer @@ -643,6 +791,8 @@ impl BufferManager { } else { return None; } + } else if self.try_add_pending_commit_vote(vote) { + reply_ack(protocol, response_sender); } else { reply_nack(protocol, response_sender); // TODO: send_commit_vote() doesn't care about the response and this should be direct send not RPC } @@ -663,16 +813,16 @@ impl BufferManager { ); let aggregated = new_item.is_aggregated(); self.buffer.set(&cursor, new_item); + + reply_ack(protocol, response_sender); if aggregated { - let response = - ConsensusMsg::CommitMessage(Box::new(CommitMessage::Ack(()))); - if let Ok(bytes) = protocol.to_bytes(&response) { - let _ = response_sender.send(Ok(bytes.into())); - } return Some(target_block_id); } + } else if self.try_add_pending_commit_proof(commit_proof.into_inner()) { + reply_ack(protocol, response_sender); + } else { + reply_nack(protocol, response_sender); // TODO: send_commit_proof() doesn't care about the response and this should be direct send not RPC } - reply_nack(protocol, response_sender); // TODO: send_commit_proof() doesn't care about the response and this should be direct send not RPC }, CommitMessage::Ack(_) => { // It should be filtered out by verify, so we log errors here @@ -767,6 +917,12 @@ impl BufferManager { .set(pending_aggregated as i64); } + fn need_back_pressure(&self) -> bool { + const MAX_BACKLOG: Round = 20; + + self.back_pressure_enabled && self.highest_committed_round + MAX_BACKLOG < self.latest_round + } + pub async fn start(mut self) { info!("Buffer manager starts."); let (verified_commit_msg_tx, mut verified_commit_msg_rx) = create_channel(); @@ -792,23 +948,24 @@ impl BufferManager { }); while !self.stop { // advancing the root will trigger sending requests to the pipeline - ::futures::select! { - blocks = self.block_rx.select_next_some() => { + ::tokio::select! { + Some(blocks) = self.block_rx.next(), if !self.need_back_pressure() => { + self.latest_round = blocks.latest_round(); monitor!("buffer_manager_process_ordered", { self.process_ordered_blocks(blocks).await; if self.execution_root.is_none() { self.advance_execution_root(); }}); }, - reset_event = self.reset_rx.select_next_some() => { + Some(reset_event) = self.reset_rx.next() => { monitor!("buffer_manager_process_reset", self.process_reset_request(reset_event).await); }, - response = self.execution_schedule_phase_rx.select_next_some() => { + Some(response) = self.execution_schedule_phase_rx.next() => { monitor!("buffer_manager_process_execution_schedule_response", { self.process_execution_schedule_response(response).await; })}, - response = self.execution_wait_phase_rx.select_next_some() => { + Some(response) = self.execution_wait_phase_rx.next() => { monitor!("buffer_manager_process_execution_wait_response", { let response_block_id = response.block_id; self.process_execution_response(response).await; @@ -827,17 +984,22 @@ impl BufferManager { self.advance_signing_root().await; }}); }, - _ = self.execution_schedule_retry_rx.select_next_some() => { + _ = self.execution_schedule_retry_rx.next() => { monitor!("buffer_manager_process_execution_schedule_retry", self.retry_schedule_phase().await); }, - response = self.signing_phase_rx.select_next_some() => { + Some(response) = self.signing_phase_rx.next() => { monitor!("buffer_manager_process_signing_response", { self.process_signing_response(response).await; self.advance_signing_root().await }) }, - rpc_request = verified_commit_msg_rx.select_next_some() => { + Some(Ok(round)) = self.persisting_phase_rx.next() => { + // see where `need_backpressure()` is called. + self.pending_commit_votes.retain(|rnd, _| *rnd > round); + self.highest_committed_round = round + }, + Some(rpc_request) = verified_commit_msg_rx.next() => { monitor!("buffer_manager_process_commit_message", if let Some(aggregated_block_id) = self.process_commit_message(rpc_request) { self.advance_head(aggregated_block_id).await; @@ -862,8 +1024,20 @@ impl BufferManager { } } +fn reply_ack(protocol: ProtocolId, response_sender: oneshot::Sender>) { + reply_commit_msg(protocol, response_sender, CommitMessage::Ack(())) +} + fn reply_nack(protocol: ProtocolId, response_sender: oneshot::Sender>) { - let response = ConsensusMsg::CommitMessage(Box::new(CommitMessage::Nack)); + reply_commit_msg(protocol, response_sender, CommitMessage::Nack) +} + +fn reply_commit_msg( + protocol: ProtocolId, + response_sender: oneshot::Sender>, + msg: CommitMessage, +) { + let response = ConsensusMsg::CommitMessage(Box::new(msg)); if let Ok(bytes) = protocol.to_bytes(&response) { let _ = response_sender.send(Ok(bytes.into())); } diff --git a/consensus/src/pipeline/commit_reliable_broadcast.rs b/consensus/src/pipeline/commit_reliable_broadcast.rs index 3b21310cba1a5..0fc7f066810a1 100644 --- a/consensus/src/pipeline/commit_reliable_broadcast.rs +++ b/consensus/src/pipeline/commit_reliable_broadcast.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{network::NetworkSender, network_interface::ConsensusMsg}; +use crate::{counters, network::NetworkSender, network_interface::ConsensusMsg}; use anyhow::bail; use aptos_consensus_types::{ common::Author, @@ -36,8 +36,18 @@ impl CommitMessage { /// Verify the signatures on the message pub fn verify(&self, verifier: &ValidatorVerifier) -> anyhow::Result<()> { match self { - CommitMessage::Vote(vote) => vote.verify(verifier), - CommitMessage::Decision(decision) => decision.verify(verifier), + CommitMessage::Vote(vote) => { + let _timer = counters::VERIFY_MSG + .with_label_values(&["commit_vote"]) + .start_timer(); + vote.verify(verifier) + }, + CommitMessage::Decision(decision) => { + let _timer = counters::VERIFY_MSG + .with_label_values(&["commit_decision"]) + .start_timer(); + decision.verify(verifier) + }, CommitMessage::Ack(_) => bail!("Unexpected ack in incoming commit message"), CommitMessage::Nack => bail!("Unexpected NACK in incoming commit message"), } diff --git a/consensus/src/pipeline/decoupled_execution_utils.rs b/consensus/src/pipeline/decoupled_execution_utils.rs index 368346e96153d..8178d871e7efc 100644 --- a/consensus/src/pipeline/decoupled_execution_utils.rs +++ b/consensus/src/pipeline/decoupled_execution_utils.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - consensus_observer::publisher::ConsensusPublisher, + consensus_observer::publisher::consensus_publisher::ConsensusPublisher, network::{IncomingCommitRequest, NetworkSender}, pipeline::{ buffer_manager::{create_channel, BufferManager, OrderedBlocks, ResetRequest}, @@ -27,6 +27,7 @@ use std::sync::{ }; /// build channels and return phases and buffer manager +#[allow(clippy::too_many_arguments)] pub fn prepare_phases_and_buffer_manager( author: Author, execution_proxy: Arc, @@ -39,8 +40,11 @@ pub fn prepare_phases_and_buffer_manager( epoch_state: Arc, bounded_executor: BoundedExecutor, order_vote_enabled: bool, + back_pressure_enabled: bool, + highest_committed_round: u64, consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, + max_pending_rounds_in_commit_vote_cache: u64, ) -> ( PipelinePhase, PipelinePhase, @@ -93,11 +97,12 @@ pub fn prepare_phases_and_buffer_manager( // Persisting Phase let (persisting_phase_request_tx, persisting_phase_request_rx) = create_channel::>(); + let (persisting_phase_response_tx, persisting_phase_response_rx) = create_channel(); let persisting_phase_processor = PersistingPhase::new(persisting_proxy); let persisting_phase = PipelinePhase::new( persisting_phase_request_rx, - None, + Some(persisting_phase_response_tx), Box::new(persisting_phase_processor), reset_flag.clone(), ); @@ -118,6 +123,7 @@ pub fn prepare_phases_and_buffer_manager( Arc::new(commit_msg_tx), commit_msg_rx, persisting_phase_request_tx, + persisting_phase_response_rx, block_rx, sync_rx, epoch_state, @@ -125,8 +131,11 @@ pub fn prepare_phases_and_buffer_manager( reset_flag.clone(), bounded_executor, order_vote_enabled, + back_pressure_enabled, + highest_committed_round, consensus_observer_config, consensus_publisher, + max_pending_rounds_in_commit_vote_cache, ), ) } diff --git a/consensus/src/pipeline/execution_client.rs b/consensus/src/pipeline/execution_client.rs index 1d51ea657bd34..31156e2e7d399 100644 --- a/consensus/src/pipeline/execution_client.rs +++ b/consensus/src/pipeline/execution_client.rs @@ -3,12 +3,12 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - consensus_observer::publisher::ConsensusPublisher, + consensus_observer::publisher::consensus_publisher::ConsensusPublisher, counters, error::StateSyncError, network::{IncomingCommitRequest, IncomingRandGenRequest, NetworkSender}, network_interface::{ConsensusMsg, ConsensusNetworkClient}, - payload_manager::PayloadManager, + payload_manager::TPayloadManager, pipeline::{ buffer_manager::{OrderedBlocks, ResetAck, ResetRequest, ResetSignal}, decoupled_execution_utils::prepare_phases_and_buffer_manager, @@ -33,11 +33,11 @@ use aptos_consensus_types::{ common::{Author, Round}, pipelined_block::PipelinedBlock, }; +use aptos_crypto::bls12381::PrivateKey; use aptos_executor_types::ExecutorResult; use aptos_infallible::RwLock; use aptos_logger::prelude::*; use aptos_network::{application::interface::NetworkClient, protocols::network::Event}; -use aptos_safety_rules::safety_rules_manager::load_consensus_key_from_secure_storage; use aptos_types::{ epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, @@ -52,22 +52,24 @@ use futures::{ use futures_channel::mpsc::unbounded; use move_core_types::account_address::AccountAddress; use std::sync::Arc; +use aptos_crypto::ed25519::Ed25519PrivateKey; #[async_trait::async_trait] pub trait TExecutionClient: Send + Sync { /// Initialize the execution phase for a new epoch. async fn start_epoch( &self, + maybe_consensus_key: Option>, epoch_state: Arc, commit_signer_provider: Arc, - payload_manager: Arc, + payload_manager: Arc, onchain_consensus_config: &OnChainConsensusConfig, onchain_execution_config: &OnChainExecutionConfig, onchain_randomness_config: &OnChainRandomnessConfig, rand_config: Option, fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, - highest_ordered_round: Round, + highest_committed_round: Round, ); /// This is needed for some DAG tests. Clean this up as a TODO. @@ -90,6 +92,9 @@ pub trait TExecutionClient: Send + Sync { /// Synchronize to a commit that not present locally. async fn sync_to(&self, target: LedgerInfoWithSignatures) -> Result<(), StateSyncError>; + /// Resets the internal state of the rand and buffer managers. + async fn reset(&self, target: &LedgerInfoWithSignatures) -> Result<()>; + /// Shutdown the current processor at the end of the epoch. async fn end_epoch(&self); } @@ -180,13 +185,15 @@ impl ExecutionProxyClient { fn spawn_decoupled_execution( &self, + maybe_consensus_key: Option>, commit_signer_provider: Arc, epoch_state: Arc, rand_config: Option, fast_rand_config: Option, onchain_consensus_config: &OnChainConsensusConfig, rand_msg_rx: aptos_channel::Receiver, - highest_ordered_round: Round, + highest_committed_round: Round, + buffer_manager_back_pressure_enabled: bool, consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, ) { @@ -212,10 +219,9 @@ impl ExecutionProxyClient { let (rand_ready_block_tx, rand_ready_block_rx) = unbounded::(); let (reset_tx_to_rand_manager, reset_rand_manager_rx) = unbounded::(); - let consensus_key = - load_consensus_key_from_secure_storage(&self.consensus_config.safety_rules) - .expect("Failed in loading consensus key for ExecutionProxyClient."); - let signer = Arc::new(ValidatorSigner::new(self.author, consensus_key)); + let consensus_sk = maybe_consensus_key + .expect("consensus key unavailable for ExecutionProxyClient"); + let signer = Arc::new(ValidatorSigner::new(self.author, consensus_sk)); let rand_manager = RandManager::::new( self.author, @@ -235,7 +241,7 @@ impl ExecutionProxyClient { rand_msg_rx, reset_rand_manager_rx, self.bounded_executor.clone(), - highest_ordered_round, + highest_committed_round, )); ( @@ -273,8 +279,12 @@ impl ExecutionProxyClient { epoch_state, self.bounded_executor.clone(), onchain_consensus_config.order_vote_enabled(), + buffer_manager_back_pressure_enabled, + highest_committed_round, consensus_observer_config, consensus_publisher, + self.consensus_config + .max_pending_rounds_in_commit_vote_cache, ); tokio::spawn(execution_schedule_phase.start()); @@ -289,25 +299,28 @@ impl ExecutionProxyClient { impl TExecutionClient for ExecutionProxyClient { async fn start_epoch( &self, + maybe_consensus_key: Option>, epoch_state: Arc, commit_signer_provider: Arc, - payload_manager: Arc, + payload_manager: Arc, onchain_consensus_config: &OnChainConsensusConfig, onchain_execution_config: &OnChainExecutionConfig, onchain_randomness_config: &OnChainRandomnessConfig, rand_config: Option, fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, - highest_ordered_round: Round, + highest_committed_round: Round, ) { let maybe_rand_msg_tx = self.spawn_decoupled_execution( + maybe_consensus_key, commit_signer_provider, epoch_state.clone(), rand_config, fast_rand_config, onchain_consensus_config, rand_msg_rx, - highest_ordered_round, + highest_committed_round, + self.consensus_config.enable_pre_commit, self.consensus_observer_config, self.consensus_publisher.clone(), ); @@ -343,19 +356,19 @@ impl TExecutionClient for ExecutionProxyClient { callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { assert!(!blocks.is_empty()); - let execute_tx = self.handle.read().execute_tx.clone(); - - if execute_tx.is_none() { - debug!("Failed to send to buffer manager, maybe epoch ends"); - return Ok(()); - } + let mut execute_tx = match self.handle.read().execute_tx.clone() { + Some(tx) => tx, + None => { + debug!("Failed to send to buffer manager, maybe epoch ends"); + return Ok(()); + }, + }; for block in blocks { block.set_insertion_time(); } if execute_tx - .unwrap() .send(OrderedBlocks { ordered_blocks: blocks .iter() @@ -393,6 +406,16 @@ impl TExecutionClient for ExecutionProxyClient { Err(anyhow::anyhow!("Injected error in sync_to").into()) }); + // Reset the rand and buffer managers to the target round + self.reset(&target).await?; + + // TODO: handle the sync error, should re-push the ordered blocks to buffer manager + // when it's reset but sync fails. + self.execution_proxy.sync_to(target).await?; + Ok(()) + } + + async fn reset(&self, target: &LedgerInfoWithSignatures) -> Result<()> { let (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) = { let handle = self.handle.read(); ( @@ -426,9 +449,6 @@ impl TExecutionClient for ExecutionProxyClient { rx.await.map_err(|_| Error::ResetDropped)?; } - // TODO: handle the sync error, should re-push the ordered blocks to buffer manager - // when it's reset but sync fails. - self.execution_proxy.sync_to(target).await?; Ok(()) } @@ -473,16 +493,17 @@ pub struct DummyExecutionClient; impl TExecutionClient for DummyExecutionClient { async fn start_epoch( &self, + _maybe_consensus_key: Option>, _epoch_state: Arc, _commit_signer_provider: Arc, - _payload_manager: Arc, + _payload_manager: Arc, _onchain_consensus_config: &OnChainConsensusConfig, _onchain_execution_config: &OnChainExecutionConfig, _onchain_randomness_config: &OnChainRandomnessConfig, _rand_config: Option, _fast_rand_config: Option, _rand_msg_rx: aptos_channel::Receiver, - _highest_ordered_round: Round, + _highest_committed_round: Round, ) { } @@ -507,5 +528,9 @@ impl TExecutionClient for DummyExecutionClient { Ok(()) } + async fn reset(&self, _: &LedgerInfoWithSignatures) -> Result<()> { + Ok(()) + } + async fn end_epoch(&self) {} } diff --git a/consensus/src/pipeline/execution_schedule_phase.rs b/consensus/src/pipeline/execution_schedule_phase.rs index 96586273649c1..4385b3b47fcd5 100644 --- a/consensus/src/pipeline/execution_schedule_phase.rs +++ b/consensus/src/pipeline/execution_schedule_phase.rs @@ -6,7 +6,6 @@ use crate::{ execution_wait_phase::ExecutionWaitRequest, pipeline_phase::{CountedRequest, StatelessPipeline}, }, - state_computer::PipelineExecutionResult, state_replication::StateComputer, }; use aptos_consensus_types::pipelined_block::PipelinedBlock; @@ -26,8 +25,8 @@ use std::{ pub struct ExecutionRequest { pub ordered_blocks: Vec, - // Hold a CountedRequest to guarantee the executor doesn't get reset with pending tasks - // stuck in the ExecutinoPipeline. + // Pass down a CountedRequest to the ExecutionPipeline stages in order to guarantee the executor + // doesn't get reset with pending tasks stuck in the pipeline. pub lifetime_guard: CountedRequest<()>, } @@ -66,14 +65,15 @@ impl StatelessPipeline for ExecutionSchedulePhase { lifetime_guard, } = req; - if ordered_blocks.is_empty() { - return ExecutionWaitRequest { - block_id: HashValue::zero(), - fut: Box::pin(async { Err(aptos_executor_types::ExecutorError::EmptyBlocks) }), - }; - } - - let block_id = ordered_blocks.last().unwrap().id(); + let block_id = match ordered_blocks.last() { + Some(block) => block.id(), + None => { + return ExecutionWaitRequest { + block_id: HashValue::zero(), + fut: Box::pin(async { Err(aptos_executor_types::ExecutorError::EmptyBlocks) }), + } + }, + }; // Call schedule_compute() for each block here (not in the fut being returned) to // make sure they are scheduled in order. @@ -81,22 +81,23 @@ impl StatelessPipeline for ExecutionSchedulePhase { for b in &ordered_blocks { let fut = self .execution_proxy - .schedule_compute(b.block(), b.parent_id(), b.randomness().cloned()) + .schedule_compute( + b.block(), + b.parent_id(), + b.randomness().cloned(), + lifetime_guard.spawn(()), + ) .await; futs.push(fut) } // In the future being returned, wait for the compute results in order. - // n.b. Must `spawn()` here to make sure lifetime_guard will be released even if - // ExecutionWait phase is never kicked off. let fut = tokio::task::spawn(async move { let mut results = vec![]; for (block, fut) in itertools::zip_eq(ordered_blocks, futs) { debug!("try to receive compute result for block {}", block.id()); - let PipelineExecutionResult { input_txns, result } = fut.await?; - results.push(block.set_execution_result(input_txns, result)); + results.push(block.set_execution_result(fut.await?)); } - drop(lifetime_guard); Ok(results) }) .map_err(ExecutorError::internal_err) diff --git a/consensus/src/pipeline/persisting_phase.rs b/consensus/src/pipeline/persisting_phase.rs index 8a60e4589c131..ba37d507ed292 100644 --- a/consensus/src/pipeline/persisting_phase.rs +++ b/consensus/src/pipeline/persisting_phase.rs @@ -6,7 +6,7 @@ use crate::{ pipeline::pipeline_phase::StatelessPipeline, state_replication::{StateComputer, StateComputerCommitCallBackType}, }; -use aptos_consensus_types::pipelined_block::PipelinedBlock; +use aptos_consensus_types::{common::Round, pipelined_block::PipelinedBlock}; use aptos_executor_types::ExecutorResult; use aptos_types::ledger_info::LedgerInfoWithSignatures; use async_trait::async_trait; @@ -42,7 +42,7 @@ impl Display for PersistingRequest { } } -pub type PersistingResponse = ExecutorResult<()>; +pub type PersistingResponse = ExecutorResult; pub struct PersistingPhase { persisting_handle: Arc, @@ -67,9 +67,11 @@ impl StatelessPipeline for PersistingPhase { commit_ledger_info, callback, } = req; + let round = commit_ledger_info.ledger_info().round(); self.persisting_handle .commit(&blocks, commit_ledger_info, callback) .await + .map(|_| round) } } diff --git a/consensus/src/pipeline/pipeline_phase.rs b/consensus/src/pipeline/pipeline_phase.rs index 89ddbda65ed5c..3b98ab97d26fc 100644 --- a/consensus/src/pipeline/pipeline_phase.rs +++ b/consensus/src/pipeline/pipeline_phase.rs @@ -33,6 +33,10 @@ impl TaskGuard { counter.fetch_add(1, Ordering::SeqCst); Self { counter } } + + fn spawn(&self) -> Self { + Self::new(self.counter.clone()) + } } impl Drop for TaskGuard { @@ -51,6 +55,13 @@ impl CountedRequest { let guard = TaskGuard::new(counter); Self { req, guard } } + + pub fn spawn(&self, other_req: OtherRequest) -> CountedRequest { + CountedRequest { + req: other_req, + guard: self.guard.spawn(), + } + } } pub struct PipelinePhase { diff --git a/consensus/src/pipeline/tests/buffer_manager_tests.rs b/consensus/src/pipeline/tests/buffer_manager_tests.rs index 919c96dcbccb6..9ef9ed94600cd 100644 --- a/consensus/src/pipeline/tests/buffer_manager_tests.rs +++ b/consensus/src/pipeline/tests/buffer_manager_tests.rs @@ -157,8 +157,11 @@ pub fn prepare_buffer_manager( }), bounded_executor, false, + true, + 0, ConsensusObserverConfig::default(), None, + 100, ); ( @@ -255,8 +258,9 @@ async fn assert_results( batches: Vec>, result_rx: &mut Receiver, ) { + let total_batches = batches.iter().flatten().count(); let mut blocks: Vec = Vec::new(); - for _ in 0..batches.len() { + while blocks.len() < total_batches { let OrderedBlocks { ordered_blocks, .. } = result_rx.next().await.unwrap(); blocks.extend(ordered_blocks.into_iter()); } @@ -340,7 +344,6 @@ fn buffer_manager_happy_path_test() { }); } -#[ignore] // TODO: turn this test back on once the flakes have resolved. #[test] fn buffer_manager_sync_test() { // happy path diff --git a/consensus/src/pipeline/tests/signing_phase_tests.rs b/consensus/src/pipeline/tests/signing_phase_tests.rs index e5c5b534b6f0f..6e16aa11b3b7d 100644 --- a/consensus/src/pipeline/tests/signing_phase_tests.rs +++ b/consensus/src/pipeline/tests/signing_phase_tests.rs @@ -73,6 +73,7 @@ fn add_signing_phase_test_cases( ); let (_, executed_ledger_info) = prepare_executed_blocks_with_executed_ledger_info(&signers[0]); + let executed_commit_ledger_info = executed_ledger_info.ledger_info().clone(); let inconsistent_commit_ledger_info = LedgerInfo::new(BlockInfo::random(1), HashValue::from_u64(0xBEEF)); @@ -90,17 +91,15 @@ fn add_signing_phase_test_cases( }), ); - // not ordered-only + // ordered ledger info same as commit ledger info phase_tester.add_test_case( SigningRequest { ordered_ledger_info: executed_ledger_info.clone(), commit_ledger_info: executed_ledger_info.ledger_info().clone(), }, Box::new(move |resp| { - assert!(matches!( - resp.signature_result, - Err(Error::InvalidOrderedLedgerInfo(_)) - )); + assert!(resp.signature_result.is_ok()); + assert_eq!(resp.commit_ledger_info, executed_commit_ledger_info); }), ); diff --git a/consensus/src/quorum_store/batch_coordinator.rs b/consensus/src/quorum_store/batch_coordinator.rs index 177e8d102c6e9..72f02ae6497af 100644 --- a/consensus/src/quorum_store/batch_coordinator.rs +++ b/consensus/src/quorum_store/batch_coordinator.rs @@ -76,7 +76,7 @@ impl BatchCoordinator { let peer_id = persist_requests[0].author(); let batches = persist_requests .iter() - .map(|persisted_value| persisted_value.batch_info().clone()) + .map(|persisted_value| (persisted_value.batch_info().clone(), persisted_value.clone().summary())) .collect(); let signed_batch_infos = batch_store.persist(persist_requests); if !signed_batch_infos.is_empty() { diff --git a/consensus/src/quorum_store/batch_generator.rs b/consensus/src/quorum_store/batch_generator.rs index ae59c83166fb5..65805bb356ffd 100644 --- a/consensus/src/quorum_store/batch_generator.rs +++ b/consensus/src/quorum_store/batch_generator.rs @@ -425,18 +425,22 @@ impl BatchGenerator { trace!("QS: dynamic_max_pull_txn_per_s: {}", dynamic_pull_txn_per_s); } counters::QS_BACKPRESSURE_TXN_COUNT.observe(1.0); + counters::QS_BACKPRESSURE_MAKE_STRICTER_TXN_COUNT.observe(1.0); counters::QS_BACKPRESSURE_DYNAMIC_MAX.observe(dynamic_pull_txn_per_s as f64); } else { // additive increase, every second if back_pressure_increase_latest.elapsed() >= back_pressure_increase_duration { back_pressure_increase_latest = tick_start; dynamic_pull_txn_per_s = std::cmp::min( - dynamic_pull_txn_per_s + self.config.back_pressure.dynamic_min_txn_per_s, + dynamic_pull_txn_per_s + self.config.back_pressure.additive_increase_when_no_backpressure, self.config.back_pressure.dynamic_max_txn_per_s, ); trace!("QS: dynamic_max_pull_txn_per_s: {}", dynamic_pull_txn_per_s); } - counters::QS_BACKPRESSURE_TXN_COUNT.observe(0.0); + counters::QS_BACKPRESSURE_TXN_COUNT.observe( + if dynamic_pull_txn_per_s < self.config.back_pressure.dynamic_max_txn_per_s { 1.0 } else { 0.0 } + ); + counters::QS_BACKPRESSURE_MAKE_STRICTER_TXN_COUNT.observe(0.0); counters::QS_BACKPRESSURE_DYNAMIC_MAX.observe(dynamic_pull_txn_per_s as f64); } if self.back_pressure.proof_count { diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs new file mode 100644 index 0000000000000..cfff3bb9c7061 --- /dev/null +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -0,0 +1,816 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{ + batch_store::BatchStore, + utils::{BatchKey, BatchSortKey, TimeExpirations}, +}; +use crate::quorum_store::counters; +use aptos_consensus_types::{ + common::TxnSummaryWithExpiration, + payload::TDataInfo, + proof_of_store::{BatchInfo, ProofOfStore}, + utils::PayloadTxnsSize, +}; +use aptos_logger::{info, sample, sample::SampleRate, warn}; +use aptos_types::{transaction::SignedTransaction, PeerId}; +use rand::{prelude::SliceRandom, thread_rng}; +use std::{ + cmp::Reverse, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; + +/// QueueItem represents an item in the ProofBatchQueue. +/// It stores the transaction summaries and proof associated with the +/// batch. +struct QueueItem { + /// The info of the Batch this item stores + info: BatchInfo, + /// Contains the summary of transactions in the batch. + /// It is optional as the summary can be updated after the proof. + txn_summaries: Option>, + + /// Contains the proof associated with the batch. + /// It is optional as the proof can be updated after the summary. + proof: Option, + /// The time when the proof is inserted into this item. + proof_insertion_time: Option, +} + +impl QueueItem { + fn is_committed(&self) -> bool { + self.proof.is_none() && self.proof_insertion_time.is_none() && self.txn_summaries.is_none() + } + + fn mark_committed(&mut self) { + self.proof = None; + self.proof_insertion_time = None; + self.txn_summaries = None; + } +} + +pub struct BatchProofQueue { + my_peer_id: PeerId, + // Queue per peer to ensure fairness between peers and priority within peer + author_to_batches: HashMap>, + // Map of Batch key to QueueItem containing Batch data and proofs + items: HashMap, + // Number of unexpired and uncommitted proofs in which the txn_summary = (sender, sequence number, hash, expiration) + // has been included. We only count those batches that are in both author_to_batches and items along with proofs. + txn_summary_num_occurrences: HashMap, + // Expiration index + expirations: TimeExpirations, + batch_store: Arc, + + latest_block_timestamp: u64, + remaining_txns_with_duplicates: u64, + remaining_proofs: u64, + remaining_local_txns: u64, + remaining_local_proofs: u64, +} + +impl BatchProofQueue { + pub(crate) fn new(my_peer_id: PeerId, batch_store: Arc) -> Self { + Self { + my_peer_id, + author_to_batches: HashMap::new(), + items: HashMap::new(), + txn_summary_num_occurrences: HashMap::new(), + expirations: TimeExpirations::new(), + batch_store, + latest_block_timestamp: 0, + remaining_txns_with_duplicates: 0, + remaining_proofs: 0, + remaining_local_txns: 0, + remaining_local_proofs: 0, + } + } + + #[inline] + fn inc_remaining_proofs(&mut self, author: &PeerId, num_txns: u64) { + self.remaining_txns_with_duplicates += num_txns; + self.remaining_proofs += 1; + if *author == self.my_peer_id { + self.remaining_local_txns += num_txns; + self.remaining_local_proofs += 1; + } + } + + #[inline] + fn dec_remaining_proofs(&mut self, author: &PeerId, num_txns: u64) { + self.remaining_txns_with_duplicates -= num_txns; + self.remaining_proofs -= 1; + if *author == self.my_peer_id { + self.remaining_local_txns -= num_txns; + self.remaining_local_proofs -= 1; + } + } + + #[cfg(test)] + pub(crate) fn batch_summaries_len(&self) -> usize { + self.items + .iter() + .filter(|(_, item)| item.txn_summaries.is_some()) + .count() + } + + pub(crate) fn num_batches_without_proof(&self) -> usize { + self.items + .iter() + .filter(|(_, item)| item.proof.is_none()) + .count() + } + + #[cfg(test)] + pub(crate) fn is_empty(&self) -> bool { + self.items.is_empty() + && self.author_to_batches.is_empty() + && self.expirations.is_empty() + && self.txn_summary_num_occurrences.is_empty() + } + + fn remaining_txns_without_duplicates(&self) -> u64 { + // txn_summary_num_occurrences counts all the unexpired and uncommitted proofs that have txn summaries + // in batch_summaries. + let mut remaining_txns = self.txn_summary_num_occurrences.len() as u64; + + // For the unexpired and uncommitted proofs that don't have transaction summaries in batch_summaries, + // we need to add the proof.num_txns() to the remaining_txns. + remaining_txns += self + .author_to_batches + .values() + .map(|batches| { + batches + .keys() + .map(|batch_sort_key| { + if let Some(item) = self.items.get(&batch_sort_key.batch_key) { + if item.txn_summaries.is_none() { + if let Some(ref proof) = item.proof { + // The batch has a proof but not txn summaries + return proof.num_txns(); + } + } + } + 0 + }) + .sum::() + }) + .sum::(); + + remaining_txns + } + + /// Add the ProofOfStore to proof queue. + pub(crate) fn insert_proof(&mut self, proof: ProofOfStore) { + if proof.expiration() <= self.latest_block_timestamp { + counters::inc_rejected_pos_count(counters::POS_EXPIRED_LABEL); + return; + } + let batch_key = BatchKey::from_info(proof.info()); + if self + .items + .get(&batch_key) + .is_some_and(|item| item.proof.is_some() || item.is_committed()) + { + counters::inc_rejected_pos_count(counters::POS_DUPLICATE_LABEL); + return; + } + + let author = proof.author(); + let bucket = proof.gas_bucket_start(); + let num_txns = proof.num_txns(); + let expiration = proof.expiration(); + + let batch_sort_key = BatchSortKey::from_info(proof.info()); + self.author_to_batches + .entry(author) + .or_default() + .insert(batch_sort_key.clone(), proof.info().clone()); + self.expirations.add_item(batch_sort_key, expiration); + + // If we are here, then proof is added for the first time. Otherwise, we will + // return early. We only count when proof is added for the first time and txn + // summary exists. + if let Some(txn_summaries) = self + .items + .get(&batch_key) + .and_then(|item| item.txn_summaries.as_ref()) + { + for txn_summary in txn_summaries { + *self + .txn_summary_num_occurrences + .entry(*txn_summary) + .or_insert(0) += 1; + } + } + + match self.items.entry(batch_key) { + Entry::Occupied(mut entry) => { + let item = entry.get_mut(); + item.proof = Some(proof); + item.proof_insertion_time = Some(Instant::now()); + }, + Entry::Vacant(entry) => { + entry.insert(QueueItem { + info: proof.info().clone(), + proof: Some(proof), + proof_insertion_time: Some(Instant::now()), + txn_summaries: None, + }); + }, + } + + if author == self.my_peer_id { + counters::inc_local_pos_count(bucket); + } else { + counters::inc_remote_pos_count(bucket); + } + self.inc_remaining_proofs(&author, num_txns); + + sample!( + SampleRate::Duration(Duration::from_millis(500)), + self.gc_expired_batch_summaries_without_proofs() + ); + } + + pub fn insert_batches( + &mut self, + batches_with_txn_summaries: Vec<(BatchInfo, Vec)>, + ) { + let start = Instant::now(); + + for (batch_info, txn_summaries) in batches_with_txn_summaries.into_iter() { + let batch_sort_key = BatchSortKey::from_info(&batch_info); + let batch_key = BatchKey::from_info(&batch_info); + + // If the batch is either committed or the txn summary already exists, skip + // inserting this batch. + if self + .items + .get(&batch_key) + .is_some_and(|item| item.is_committed() || item.txn_summaries.is_some()) + { + continue; + } + + self.author_to_batches + .entry(batch_info.author()) + .or_default() + .insert(batch_sort_key.clone(), batch_info.clone()); + self.expirations + .add_item(batch_sort_key, batch_info.expiration()); + + // We only count txn summaries first time it is added to the queue + // and only if the proof already exists. + if self + .items + .get(&batch_key) + .is_some_and(|item| item.proof.is_some()) + { + for txn_summary in &txn_summaries { + *self + .txn_summary_num_occurrences + .entry(*txn_summary) + .or_insert(0) += 1; + } + } + + match self.items.entry(batch_key) { + Entry::Occupied(mut entry) => { + entry.get_mut().txn_summaries = Some(txn_summaries); + }, + Entry::Vacant(entry) => { + entry.insert(QueueItem { + info: batch_info, + proof: None, + proof_insertion_time: None, + txn_summaries: Some(txn_summaries), + }); + }, + } + } + + sample!( + SampleRate::Duration(Duration::from_millis(500)), + self.gc_expired_batch_summaries_without_proofs() + ); + counters::PROOF_QUEUE_ADD_BATCH_SUMMARIES_DURATION.observe_duration(start.elapsed()); + } + + // If the validator receives the batch from batch coordinator, but doesn't receive the corresponding + // proof before the batch expires, the batch summary will be garbage collected. + fn gc_expired_batch_summaries_without_proofs(&mut self) { + let timestamp = aptos_infallible::duration_since_epoch().as_micros() as u64; + self.items.retain(|_, item| { + if item.is_committed() || item.proof.is_some() || item.info.expiration() > timestamp { + true + } else { + self.author_to_batches + .get_mut(&item.info.author()) + .map(|queue| queue.remove(&BatchSortKey::from_info(&item.info))); + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["expired_batch_without_proof"]) + .inc(); + false + } + }); + } + + fn log_remaining_data_after_pull( + &self, + excluded_batches: &HashSet, + pulled_proofs: &[ProofOfStore], + ) { + let mut num_proofs_remaining_after_pull = 0; + let mut num_txns_remaining_after_pull = 0; + let excluded_batch_keys = excluded_batches + .iter() + .map(BatchKey::from_info) + .collect::>(); + + let remaining_batches = self + .author_to_batches + .iter() + .flat_map(|(_, batches)| batches) + .filter(|(batch_sort_key, _)| { + !excluded_batch_keys.contains(&batch_sort_key.batch_key) + && !pulled_proofs + .iter() + .any(|p| BatchKey::from_info(p.info()) == batch_sort_key.batch_key) + }) + .filter_map(|(batch_sort_key, batch)| { + if self + .items + .get(&batch_sort_key.batch_key) + .is_some_and(|item| item.proof.is_some()) + { + Some(batch) + } else { + None + } + }); + + for batch in remaining_batches { + num_proofs_remaining_after_pull += 1; + num_txns_remaining_after_pull += batch.num_txns(); + } + + let pulled_txns = pulled_proofs.iter().map(|p| p.num_txns()).sum::(); + info!( + "pulled_proofs: {}, pulled_txns: {}, remaining_proofs: {:?}, remaining_txns: {:?}", + pulled_proofs.len(), + pulled_txns, + num_proofs_remaining_after_pull, + num_txns_remaining_after_pull, + ); + counters::NUM_PROOFS_IN_PROOF_QUEUE_AFTER_PULL + .observe(num_proofs_remaining_after_pull as f64); + counters::NUM_TXNS_IN_PROOF_QUEUE_AFTER_PULL.observe(num_txns_remaining_after_pull as f64); + } + + // gets excluded and iterates over the vector returning non excluded or expired entries. + // return the vector of pulled PoS, and the size of the remaining PoS + // The flag in the second return argument is true iff the entire proof queue is fully utilized + // when pulling the proofs. If any proof from proof queue cannot be included due to size limits, + // this flag is set false. + // Returns the proofs, the number of unique transactions in the proofs, and a flag indicating + // whether the proof queue is fully utilized. + pub(crate) fn pull_proofs( + &mut self, + excluded_batches: &HashSet, + max_txns: PayloadTxnsSize, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + return_non_full: bool, + block_timestamp: Duration, + ) -> (Vec, PayloadTxnsSize, u64, bool) { + let (result, all_txns, unique_txns, is_full) = self.pull_internal( + false, + excluded_batches, + max_txns, + max_txns_after_filtering, + soft_max_txns_after_filtering, + return_non_full, + block_timestamp, + ); + let proof_of_stores: Vec<_> = result + .into_iter() + .map(|item| { + let proof = item.proof.clone().expect("proof must exist due to filter"); + let bucket = proof.gas_bucket_start(); + counters::pos_to_pull( + bucket, + item.proof_insertion_time + .expect("proof must exist due to filter") + .elapsed() + .as_secs_f64(), + ); + proof + }) + .collect(); + + if is_full || return_non_full { + counters::BLOCK_SIZE_WHEN_PULL.observe(unique_txns as f64); + counters::TOTAL_BLOCK_SIZE_WHEN_PULL.observe(all_txns.count() as f64); + counters::KNOWN_DUPLICATE_TXNS_WHEN_PULL + .observe((all_txns.count().saturating_sub(unique_txns)) as f64); + counters::BLOCK_BYTES_WHEN_PULL.observe(all_txns.size_in_bytes() as f64); + + counters::PROOF_SIZE_WHEN_PULL.observe(proof_of_stores.len() as f64); + // Number of proofs remaining in proof queue after the pull + self.log_remaining_data_after_pull(excluded_batches, &proof_of_stores); + } + + (proof_of_stores, all_txns, unique_txns, !is_full) + } + + pub fn pull_batches( + &mut self, + excluded_batches: &HashSet, + max_txns: PayloadTxnsSize, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + return_non_full: bool, + block_timestamp: Duration, + ) -> (Vec, PayloadTxnsSize, u64) { + let (result, all_txns, unique_txns, _) = self.pull_internal( + true, + excluded_batches, + max_txns, + max_txns_after_filtering, + soft_max_txns_after_filtering, + return_non_full, + block_timestamp, + ); + let batches = result.into_iter().map(|item| item.info.clone()).collect(); + (batches, all_txns, unique_txns) + } + + pub fn pull_batches_with_transactions( + &mut self, + excluded_batches: &HashSet, + max_txns: PayloadTxnsSize, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + return_non_full: bool, + block_timestamp: Duration, + ) -> ( + Vec<(BatchInfo, Vec)>, + PayloadTxnsSize, + u64, + ) { + let (batches, all_txns, unique_txns) = self.pull_batches( + excluded_batches, + max_txns, + max_txns_after_filtering, + soft_max_txns_after_filtering, + return_non_full, + block_timestamp, + ); + let mut result = Vec::new(); + for batch in batches.into_iter() { + if let Ok(mut persisted_value) = self.batch_store.get_batch_from_local(batch.digest()) { + if let Some(txns) = persisted_value.take_payload() { + result.push((batch, txns)); + } + } else { + warn!( + "Couldn't find a batch in local storage while creating inline block: {:?}", + batch.digest() + ); + } + } + (result, all_txns, unique_txns) + } + + fn pull_internal( + &mut self, + batches_without_proofs: bool, + excluded_batches: &HashSet, + max_txns: PayloadTxnsSize, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + return_non_full: bool, + block_timestamp: Duration, + ) -> (Vec<&QueueItem>, PayloadTxnsSize, u64, bool) { + let mut result = Vec::new(); + let mut cur_unique_txns = 0; + let mut cur_all_txns = PayloadTxnsSize::zero(); + let mut excluded_txns = 0; + let mut full = false; + // Set of all the excluded transactions and all the transactions included in the result + let mut filtered_txns = HashSet::new(); + for batch_info in excluded_batches { + let batch_key = BatchKey::from_info(batch_info); + if let Some(txn_summaries) = self + .items + .get(&batch_key) + .and_then(|item| item.txn_summaries.as_ref()) + { + for txn_summary in txn_summaries { + filtered_txns.insert(*txn_summary); + } + } + } + + let mut iters = vec![]; + for (_, batches) in self.author_to_batches.iter() { + let batch_iter = batches.iter().rev().filter_map(|(sort_key, info)| { + if let Some(item) = self.items.get(&sort_key.batch_key) { + if item.is_committed() { + return None; + } + if !(batches_without_proofs ^ item.proof.is_none()) { + return Some((info, item)); + } + } + None + }); + iters.push(batch_iter); + } + + while !iters.is_empty() { + iters.shuffle(&mut thread_rng()); + iters.retain_mut(|iter| { + if full { + return false; + } + + if let Some((batch, item)) = iter.next() { + if excluded_batches.contains(batch) { + excluded_txns += batch.num_txns(); + } else { + // Calculate the number of unique transactions if this batch is included in the result + let unique_txns = if let Some(ref txn_summaries) = item.txn_summaries { + cur_unique_txns + + txn_summaries + .iter() + .filter(|txn_summary| { + !filtered_txns.contains(txn_summary) + && block_timestamp.as_secs() + < txn_summary.expiration_timestamp_secs + }) + .count() as u64 + } else { + cur_unique_txns + batch.num_txns() + }; + if cur_all_txns + batch.size() > max_txns + || unique_txns > max_txns_after_filtering + { + // Exceeded the limit for requested bytes or number of transactions. + full = true; + return false; + } + cur_all_txns += batch.size(); + // Add this batch to filtered_txns and calculate the number of + // unique transactions added in the result so far. + cur_unique_txns += + item.txn_summaries + .as_ref() + .map_or(batch.num_txns(), |summaries| { + summaries + .iter() + .filter(|summary| { + filtered_txns.insert(**summary) + && block_timestamp.as_secs() + < summary.expiration_timestamp_secs + }) + .count() as u64 + }); + assert!(item.proof.is_none() == batches_without_proofs); + result.push(item); + if cur_all_txns == max_txns + || cur_unique_txns == max_txns_after_filtering + || cur_unique_txns >= soft_max_txns_after_filtering + { + full = true; + return false; + } + } + true + } else { + false + } + }) + } + info!( + // before non full check + block_total_txns = cur_all_txns, + block_unique_txns = cur_unique_txns, + max_txns = max_txns, + max_txns_after_filtering = max_txns_after_filtering, + soft_max_txns_after_filtering = soft_max_txns_after_filtering, + max_bytes = max_txns.size_in_bytes(), + result_is_proof = !batches_without_proofs, + result_count = result.len(), + full = full, + return_non_full = return_non_full, + "Pull payloads from QuorumStore: internal" + ); + + counters::EXCLUDED_TXNS_WHEN_PULL.observe(excluded_txns as f64); + + if full || return_non_full { + // Stable sort, so the order of proofs within an author will not change. + result.sort_by_key(|item| Reverse(item.info.gas_bucket_start())); + (result, cur_all_txns, cur_unique_txns, full) + } else { + (Vec::new(), PayloadTxnsSize::zero(), 0, full) + } + } + + pub(crate) fn handle_updated_block_timestamp(&mut self, block_timestamp: u64) { + let start = Instant::now(); + assert!( + self.latest_block_timestamp <= block_timestamp, + "Decreasing block timestamp" + ); + self.latest_block_timestamp = block_timestamp; + if let Some(time_lag) = aptos_infallible::duration_since_epoch() + .checked_sub(Duration::from_micros(block_timestamp)) + { + counters::TIME_LAG_IN_BATCH_PROOF_QUEUE.observe_duration(time_lag); + } + + let expired = self.expirations.expire(block_timestamp); + let mut num_expired_but_not_committed = 0; + for key in &expired { + if let Some(mut queue) = self.author_to_batches.remove(&key.author()) { + if let Some(batch) = queue.remove(key) { + let item = self + .items + .get(&key.batch_key) + .expect("Entry for unexpired batch must exist"); + if item.proof.is_some() { + // not committed proof that is expired + num_expired_but_not_committed += 1; + counters::GAP_BETWEEN_BATCH_EXPIRATION_AND_CURRENT_TIME_WHEN_COMMIT + .observe((block_timestamp - batch.expiration()) as f64); + if let Some(ref txn_summaries) = item.txn_summaries { + for txn_summary in txn_summaries { + if let Some(count) = + self.txn_summary_num_occurrences.get_mut(txn_summary) + { + *count -= 1; + if *count == 0 { + self.txn_summary_num_occurrences.remove(txn_summary); + } + }; + } + } + self.dec_remaining_proofs(&batch.author(), batch.num_txns()); + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["expired_proof"]) + .inc(); + } + claims::assert_some!(self.items.remove(&key.batch_key)); + } + if !queue.is_empty() { + self.author_to_batches.insert(key.author(), queue); + } + } + } + counters::PROOF_QUEUE_UPDATE_TIMESTAMP_DURATION.observe_duration(start.elapsed()); + counters::NUM_PROOFS_EXPIRED_WHEN_COMMIT.inc_by(num_expired_but_not_committed); + } + + // Number of unexpired and uncommitted proofs in the pipeline without txn summaries in + // batch_summaries + fn num_proofs_without_batch_summary(&self) -> u64 { + let mut count = 0; + self.author_to_batches.values().for_each(|batches| { + count += batches + .iter() + .filter(|(sort_key, _)| { + self.items.get(&sort_key.batch_key).map_or(false, |item| { + item.proof.is_some() && item.txn_summaries.is_none() + }) + }) + .count() as u64; + }); + count + } + + // Number of unexpired and uncommitted proofs in the pipeline with txn summaries in + // batch_summaries + fn num_proofs_with_batch_summary(&self) -> u64 { + let mut count = 0; + self.author_to_batches.values().for_each(|batches| { + count += batches + .iter() + .filter(|(sort_key, _)| { + self.items.get(&sort_key.batch_key).map_or(false, |item| { + item.proof.is_some() && item.txn_summaries.is_some() + }) + }) + .count() as u64; + }); + count + } + + pub(crate) fn remaining_txns_and_proofs(&self) -> (u64, u64) { + let start = Instant::now(); + counters::NUM_TOTAL_TXNS_LEFT_ON_UPDATE.observe(self.remaining_txns_with_duplicates as f64); + counters::NUM_TOTAL_PROOFS_LEFT_ON_UPDATE.observe(self.remaining_proofs as f64); + counters::NUM_LOCAL_TXNS_LEFT_ON_UPDATE.observe(self.remaining_local_txns as f64); + counters::NUM_LOCAL_PROOFS_LEFT_ON_UPDATE.observe(self.remaining_local_proofs as f64); + + let remaining_txns_without_duplicates = self.remaining_txns_without_duplicates(); + counters::NUM_UNIQUE_TOTAL_TXNS_LEFT_ON_UPDATE + .observe(remaining_txns_without_duplicates as f64); + + // Number of txns with more than one batches + sample!( + SampleRate::Duration(Duration::from_secs(3)), + counters::TXNS_WITH_DUPLICATE_BATCHES.observe( + self.txn_summary_num_occurrences + .iter() + .filter(|(_, count)| **count > 1) + .count() as f64, + ); + ); + + // Number of txns in unexpired and uncommitted proofs with summaries in batch_summaries + counters::TXNS_IN_PROOFS_WITH_SUMMARIES + .observe(self.txn_summary_num_occurrences.len() as f64); + + // Number of txns in unexpired and uncommitted proofs without summaries in batch_summaries + counters::TXNS_IN_PROOFS_WITHOUT_SUMMARIES.observe( + remaining_txns_without_duplicates + .saturating_sub(self.txn_summary_num_occurrences.len() as u64) as f64, + ); + + counters::PROOFS_WITHOUT_BATCH_SUMMARY + .observe(self.num_proofs_without_batch_summary() as f64); + counters::PROOFS_WITH_BATCH_SUMMARY.observe(self.num_proofs_with_batch_summary() as f64); + + counters::PROOF_QUEUE_REMAINING_TXNS_DURATION.observe_duration(start.elapsed()); + (remaining_txns_without_duplicates, self.remaining_proofs) + } + + // Mark in the hashmap committed PoS, but keep them until they expire + pub(crate) fn mark_committed(&mut self, batches: Vec) { + let start = Instant::now(); + for batch in batches.into_iter() { + let batch_key = BatchKey::from_info(&batch); + if let Some(item) = self.items.get(&batch_key) { + if let Some(ref proof) = item.proof { + let insertion_time = item + .proof_insertion_time + .expect("Insertion time is updated with proof"); + counters::pos_to_commit( + proof.gas_bucket_start(), + insertion_time.elapsed().as_secs_f64(), + ); + self.dec_remaining_proofs(&batch.author(), batch.num_txns()); + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["committed_proof"]) + .inc(); + } + let item = self + .items + .get_mut(&batch_key) + .expect("must exist due to check"); + + if item.proof.is_some() { + if let Some(ref txn_summaries) = item.txn_summaries { + for txn_summary in txn_summaries { + if let Some(count) = + self.txn_summary_num_occurrences.get_mut(txn_summary) + { + *count -= 1; + if *count == 0 { + self.txn_summary_num_occurrences.remove(txn_summary); + } + }; + } + } + } else if !item.is_committed() { + counters::GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER + .with_label_values(&["committed_batch_without_proof"]) + .inc(); + } + // The item is just marked committed for now. + // When the batch is expired, then it will be removed from items. + item.mark_committed(); + } else { + let batch_sort_key = BatchSortKey::from_info(batch.info()); + self.expirations + .add_item(batch_sort_key.clone(), batch.expiration()); + self.author_to_batches + .entry(batch.author()) + .or_default() + .insert(batch_sort_key, batch.clone()); + self.items.insert(batch_key, QueueItem { + info: batch, + txn_summaries: None, + proof: None, + proof_insertion_time: None, + }); + } + } + counters::PROOF_QUEUE_COMMIT_DURATION.observe_duration(start.elapsed()); + } +} diff --git a/consensus/src/quorum_store/batch_requester.rs b/consensus/src/quorum_store/batch_requester.rs index 3e136c6f1efb1..01b5e47c7f003 100644 --- a/consensus/src/quorum_store/batch_requester.rs +++ b/consensus/src/quorum_store/batch_requester.rs @@ -6,10 +6,10 @@ use crate::{ network::QuorumStoreSender, quorum_store::{ counters, - types::{BatchRequest, BatchResponse}, + types::{BatchRequest, BatchResponse, PersistedValue}, }, }; -use aptos_consensus_types::proof_of_store::{BatchInfo, ProofOfStore}; +use aptos_consensus_types::proof_of_store::BatchInfo; use aptos_crypto::HashValue; use aptos_executor_types::*; use aptos_logger::prelude::*; @@ -130,14 +130,14 @@ impl BatchRequester { pub(crate) async fn request_batch( &self, - proof: ProofOfStore, + digest: HashValue, + expiration: u64, + responders: Vec, ret_tx: oneshot::Sender>>, + mut subscriber_rx: oneshot::Receiver, ) -> Option<(BatchInfo, Vec)> { - let digest = *proof.digest(); - let expiration = proof.expiration(); - let signers = proof.shuffled_signers(&self.validator_verifier); let validator_verifier = self.validator_verifier.clone(); - let mut request_state = BatchRequesterState::new(signers, ret_tx, self.retry_limit); + let mut request_state = BatchRequesterState::new(responders, ret_tx, self.retry_limit); let network_sender = self.network_sender.clone(); let request_num_peers = self.request_num_peers; let my_peer_id = self.my_peer_id; @@ -191,6 +191,19 @@ impl BatchRequester { } } }, + result = &mut subscriber_rx => { + match result { + Ok(persisted_value) => { + counters::RECEIVED_BATCH_FROM_SUBSCRIPTION_COUNT.inc(); + let (info, maybe_payload) = persisted_value.unpack(); + request_state.serve_request(*info.digest(), maybe_payload); + return None; + } + Err(err) => { + debug!("channel closed: {}", err); + } + }; + }, } } counters::RECEIVED_BATCH_REQUEST_TIMEOUT_COUNT.inc(); diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index e6a38aa2314b0..7ae887ea08f37 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -12,7 +12,7 @@ use crate::{ }, }; use anyhow::bail; -use aptos_consensus_types::proof_of_store::{ProofOfStore, SignedBatchInfo}; +use aptos_consensus_types::proof_of_store::SignedBatchInfo; use aptos_crypto::HashValue; use aptos_executor_types::{ExecutorError, ExecutorResult}; use aptos_logger::prelude::*; @@ -116,6 +116,7 @@ pub struct BatchStore { db_quota: usize, batch_quota: usize, validator_signer: ValidatorSigner, + persist_subscribers: DashMap>>, } impl BatchStore { @@ -140,6 +141,7 @@ impl BatchStore { db_quota, batch_quota, validator_signer, + persist_subscribers: DashMap::new(), }; let db_content = db_clone .get_all_batches() @@ -172,13 +174,15 @@ impl BatchStore { "QS: Batchreader recovery expired keys len {}", expired_keys.len() ); - db_clone.delete_batches(expired_keys).unwrap(); + db_clone + .delete_batches(expired_keys) + .expect("Deletion of expired keys should not fail"); batch_store } fn epoch(&self) -> u64 { - *self.epoch.get().unwrap() + *self.epoch.get().expect("Epoch should always be set") } fn free_quota(&self, value: PersistedValue) { @@ -244,10 +248,13 @@ impl BatchStore { } // Add expiration for the inserted entry, no need to be atomic w. insertion. - self.expirations - .lock() - .unwrap() - .add_item(digest, expiration_time); + #[allow(clippy::unwrap_used)] + { + self.expirations + .lock() + .unwrap() + .add_item(digest, expiration_time); + } Ok(true) } @@ -274,6 +281,7 @@ impl BatchStore { } // pub(crate) for testing + #[allow(clippy::unwrap_used)] pub(crate) fn clear_expired_payload(&self, certified_time: u64) -> Vec { let expired_digests = self.expirations.lock().unwrap().expire(certified_time); let mut ret = Vec::new(); @@ -284,6 +292,7 @@ impl BatchStore { // digest with a higher expiration would update the persisted value and // effectively extend the expiration. if entry.get().expiration() <= certified_time { + self.persist_subscribers.remove(entry.get().digest()); Some(entry.remove()) } else { None @@ -306,6 +315,7 @@ impl BatchStore { let batch_info = persist_request.batch_info().clone(); trace!("QS: sign digest {}", persist_request.digest()); if needs_db { + #[allow(clippy::unwrap_in_result)] self.db .save_batch(persist_request) .expect("Could not write to DB"); @@ -371,13 +381,39 @@ impl BatchStore { Err(ExecutorError::CouldNotGetData) } } + + /// This calls lets the caller subscribe to a batch being added to the batch store. + /// This can be useful in cases where there are multiple flows to add a batch (like + /// direct from author batch / batch requester fetch) to the batch store and either + /// flow needs to subscribe to the other. + fn subscribe(&self, digest: HashValue) -> oneshot::Receiver { + let (tx, rx) = oneshot::channel(); + self.persist_subscribers.entry(digest).or_default().push(tx); + + // This is to account for the race where this subscribe call happens after the + // persist call. + if let Ok(value) = self.get_batch_from_local(&digest) { + self.notify_subscribers(value) + } + + rx + } + + fn notify_subscribers(&self, value: PersistedValue) { + if let Some((_, subscribers)) = self.persist_subscribers.remove(value.digest()) { + for subscriber in subscribers { + subscriber.send(value.clone()).ok(); + } + } + } } impl BatchWriter for BatchStore { fn persist(&self, persist_requests: Vec) -> Vec { let mut signed_infos = vec![]; for persist_request in persist_requests.into_iter() { - if let Some(signed_info) = self.persist_inner(persist_request) { + if let Some(signed_info) = self.persist_inner(persist_request.clone()) { + self.notify_subscribers(persist_request); signed_infos.push(signed_info); } } @@ -391,7 +427,9 @@ pub trait BatchReader: Send + Sync { fn get_batch( &self, - proof: ProofOfStore, + digest: HashValue, + expiration: u64, + signers: Vec, ) -> oneshot::Receiver>>; fn update_certified_timestamp(&self, certified_time: u64); @@ -421,26 +459,31 @@ impl BatchReader for Batch fn get_batch( &self, - proof: ProofOfStore, + digest: HashValue, + expiration: u64, + signers: Vec, ) -> oneshot::Receiver>> { let (tx, rx) = oneshot::channel(); let batch_store = self.batch_store.clone(); let batch_requester = self.batch_requester.clone(); tokio::spawn(async move { - if let Ok(mut value) = batch_store.get_batch_from_local(proof.digest()) { + if let Ok(mut value) = batch_store.get_batch_from_local(&digest) { if tx .send(Ok(value.take_payload().expect("Must have payload"))) .is_err() { debug!( "Receiver of local batch not available for digest {}", - proof.digest() + digest, ) }; } else { // Quorum store metrics counters::MISSED_BATCHES_COUNT.inc(); - if let Some((batch_info, payload)) = batch_requester.request_batch(proof, tx).await + let subscriber_rx = batch_store.subscribe(digest); + if let Some((batch_info, payload)) = batch_requester + .request_batch(digest, expiration, signers, tx, subscriber_rx) + .await { batch_store.persist(vec![PersistedValue::new(batch_info, Some(payload))]); } diff --git a/consensus/src/quorum_store/counters.rs b/consensus/src/quorum_store/counters.rs index 74f179e1fb3d6..fc31cd77e2189 100644 --- a/consensus/src/quorum_store/counters.rs +++ b/consensus/src/quorum_store/counters.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use aptos_metrics_core::{ exponential_buckets, op_counters::DurationHistogram, register_avg_counter, register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, Histogram, @@ -28,6 +30,14 @@ static TRANSACTION_COUNT_BUCKETS: Lazy> = Lazy::new(|| { .unwrap() }); +static PROOF_COUNT_BUCKETS: Lazy> = Lazy::new(|| { + [ + 1.0, 3.0, 5.0, 7.0, 10.0, 12.0, 15.0, 20.0, 25.0, 30.0, 40.0, 50.0, 60.0, 75.0, 100.0, + 125.0, 150.0, 200.0, 250.0, 300.0, 500.0, + ] + .to_vec() +}); + static BYTE_BUCKETS: Lazy> = Lazy::new(|| { exponential_buckets( /*start=*/ 500.0, /*factor=*/ 1.5, /*count=*/ 25, @@ -71,6 +81,46 @@ pub static MAIN_LOOP: Lazy = Lazy::new(|| { ) }); +pub static PROOF_QUEUE_ADD_BATCH_SUMMARIES_DURATION: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "quorum_store_proof_queue_add_batch_summaries_duration", + "Duration of adding batch summaries to proof queue" + ) + .unwrap(), + ) +}); + +pub static PROOF_QUEUE_COMMIT_DURATION: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "quorum_store_proof_queue_commit_duration", + "Duration of committing proofs from proof queue" + ) + .unwrap(), + ) +}); + +pub static PROOF_QUEUE_UPDATE_TIMESTAMP_DURATION: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "quorum_store_proof_queue_update_block_timestamp_duration", + "Duration of updating block timestamp in proof queue" + ) + .unwrap(), + ) +}); + +pub static PROOF_QUEUE_REMAINING_TXNS_DURATION: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "quorum_store_proof_queue_remaining_txns_duration", + "Duration of calculating remaining txns in proof queue" + ) + .unwrap(), + ) +}); + /// Duration of each run of the event loop. pub static PROOF_MANAGER_MAIN_LOOP: Lazy = Lazy::new(|| { DurationHistogram::new( @@ -82,6 +132,8 @@ pub static PROOF_MANAGER_MAIN_LOOP: Lazy = Lazy::new(|| { ) }); + + /// Duration of each run of the event loop. pub static BATCH_GENERATOR_MAIN_LOOP: Lazy = Lazy::new(|| { DurationHistogram::new( @@ -136,6 +188,24 @@ pub static BLOCK_SIZE_WHEN_PULL: Lazy = Lazy::new(|| { .unwrap() }); +pub static TOTAL_BLOCK_SIZE_WHEN_PULL: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_total_block_size_when_pull", + "Histogram for the total size of transactions per block when pulled for consensus.", + BYTE_BUCKETS.clone(), + ) + .unwrap() +}); + +pub static KNOWN_DUPLICATE_TXNS_WHEN_PULL: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_known_duplicate_txns_when_pull", + "Histogram for the number of known duplicate transactions in a block when pulled for consensus.", + TRANSACTION_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + pub static NUM_INLINE_BATCHES: Lazy = Lazy::new(|| { register_histogram!( "num_inline_batches_in_block_proposal", @@ -304,6 +374,71 @@ pub fn pos_to_commit(bucket: u64, secs: f64) { .observe(secs); } +////////////////////// +// Proof Queue +////////////////////// + +pub static PROOFS_WITHOUT_BATCH_SUMMARY: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_proofs_without_batch_data", + "Number of proofs received without batch data", + PROOF_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + +pub static PROOFS_WITH_BATCH_SUMMARY: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_proofs_with_batch_data", + "Number of proofs received without batch data", + PROOF_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + +pub static TXNS_WITH_DUPLICATE_BATCHES: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_txns_with_duplicate_batches", + "Number of transactions received with duplicate batches", + TRANSACTION_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + +pub static TXNS_IN_PROOFS_WITH_SUMMARIES: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_txns_in_proof_queue_with_summaries", + "Number of transactions in the proof queue", + TRANSACTION_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + +pub static TXNS_IN_PROOFS_WITHOUT_SUMMARIES: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_txns_in_proof_queue_without_summaries", + "Number of transactions in the proof queue", + TRANSACTION_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + +pub static NUM_PROOFS_IN_PROOF_QUEUE_AFTER_PULL: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_num_proofs_left_in_proof_queue_after_pull", + "Histogram for the number of proofs left in the proof queue after block proposal generation.", + PROOF_COUNT_BUCKETS.clone(), + ).unwrap() +}); + +pub static NUM_TXNS_IN_PROOF_QUEUE_AFTER_PULL: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_num_txns_left_in_proof_queue_after_pull", + "Histogram for the number of transactions left in the proof queue after block proposal generation.", + TRANSACTION_COUNT_BUCKETS.clone(), + ).unwrap() +}); + /// Histogram for the number of total txns left after adding or cleaning batches. pub static NUM_TOTAL_TXNS_LEFT_ON_UPDATE: Lazy = Lazy::new(|| { register_avg_counter( @@ -312,6 +447,14 @@ pub static NUM_TOTAL_TXNS_LEFT_ON_UPDATE: Lazy = Lazy::new(|| { ) }); +pub static NUM_UNIQUE_TOTAL_TXNS_LEFT_ON_UPDATE: Lazy = Lazy::new(|| { + register_histogram!( + "quorum_store_num_unique_total_txns_left_on_update", + "Histogram for the number of total txns left after adding or cleaning batches, without duplicates.", + TRANSACTION_COUNT_BUCKETS.clone() + ).unwrap() +}); + /// Histogram for the number of total batches/PoS left after adding or cleaning batches. pub static NUM_TOTAL_PROOFS_LEFT_ON_UPDATE: Lazy = Lazy::new(|| { register_avg_counter( @@ -606,6 +749,14 @@ pub static RECEIVED_BATCH_RESPONSE_ERROR_COUNT: Lazy = Lazy::new(|| .unwrap() }); +pub static RECEIVED_BATCH_FROM_SUBSCRIPTION_COUNT: Lazy = Lazy::new(|| { + register_int_counter!( + "quorum_store_batch_from_subscription_count", + "Count of the number of batches received via batch store subscription." + ) + .unwrap() +}); + pub static QS_BACKPRESSURE_TXN_COUNT: Lazy = Lazy::new(|| { register_avg_counter( "quorum_store_backpressure_txn_count", @@ -613,6 +764,13 @@ pub static QS_BACKPRESSURE_TXN_COUNT: Lazy = Lazy::new(|| { ) }); +pub static QS_BACKPRESSURE_MAKE_STRICTER_TXN_COUNT: Lazy = Lazy::new(|| { + register_avg_counter( + "quorum_store_backpressure_make_stricter_txn_count", + "Indicator of whether Quorum Store txn count backpressure is being made stricter.", + ) +}); + pub static QS_BACKPRESSURE_PROOF_COUNT: Lazy = Lazy::new(|| { register_avg_counter( "quorum_store_backpressure_proof_count", @@ -653,6 +811,15 @@ pub static EMPTY_BATCH_CREATION_DURATION: Lazy = Lazy::new(|| ) }); +pub static GARBAGE_COLLECTED_IN_PROOF_QUEUE_COUNTER: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "quorum_store_garbage_collected_batch_count", + "Count of the number of garbage collected batches.", + &["reason"] + ) + .unwrap() +}); + /// Histogram of the time it takes to compute bucketed batches after txns are pulled from mempool. pub static BATCH_CREATION_COMPUTE_LATENCY: Lazy = Lazy::new(|| { DurationHistogram::new( @@ -694,6 +861,25 @@ pub static BATCH_SUCCESSFUL_CREATION: Lazy = Lazy::new(|| { ) }); +pub static QUORUM_STORE_MSG_COUNT: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "quorum_store_msg_count", + "Count of messages received by various quoroum store components", + &["type"] + ) + .unwrap() +}); + +pub static TIME_LAG_IN_BATCH_PROOF_QUEUE: Lazy = Lazy::new(|| { + DurationHistogram::new( + register_histogram!( + "quorum_store_time_lag_in_proof_queue", + "Time lag between txn timestamp and current time when txn is added to proof queue", + ) + .unwrap(), + ) +}); + /// Number of validators for which we received signed replies pub static BATCH_RECEIVED_REPLIES_COUNT: Lazy = Lazy::new(|| { register_histogram!( diff --git a/consensus/src/quorum_store/direct_mempool_quorum_store.rs b/consensus/src/quorum_store/direct_mempool_quorum_store.rs index 661ff46a1d4dd..0728f333bb883 100644 --- a/consensus/src/quorum_store/direct_mempool_quorum_store.rs +++ b/consensus/src/quorum_store/direct_mempool_quorum_store.rs @@ -137,21 +137,13 @@ impl DirectMempoolQuorumStore { async fn handle_consensus_request(&self, req: GetPayloadCommand) { match req { - GetPayloadCommand::GetPayloadRequest( - max_txns, - max_bytes, - _max_inline_txns, - _max_inline_bytes, - return_non_full, - payload_filter, - callback, - ) => { + GetPayloadCommand::GetPayloadRequest(request) => { self.handle_block_request( - max_txns, - max_bytes, - return_non_full, - payload_filter, - callback, + request.max_txns_after_filtering, + request.max_txns.size_in_bytes(), + request.return_non_full, + request.filter, + request.callback, ) .await; }, diff --git a/consensus/src/quorum_store/mod.rs b/consensus/src/quorum_store/mod.rs index 888b62b0122c2..9b54ac132aa70 100644 --- a/consensus/src/quorum_store/mod.rs +++ b/consensus/src/quorum_store/mod.rs @@ -7,6 +7,7 @@ pub mod direct_mempool_quorum_store; pub(crate) mod batch_coordinator; pub(crate) mod batch_generator; +pub(crate) mod batch_proof_queue; pub(crate) mod batch_requester; pub(crate) mod batch_store; pub(crate) mod network_listener; diff --git a/consensus/src/quorum_store/network_listener.rs b/consensus/src/quorum_store/network_listener.rs index 1d31ea808b1a0..381c50ebde350 100644 --- a/consensus/src/quorum_store/network_listener.rs +++ b/consensus/src/quorum_store/network_listener.rs @@ -44,6 +44,9 @@ impl NetworkListener { match msg { // TODO: does the assumption have to be that network listener is shutdown first? VerifiedEvent::Shutdown(ack_tx) => { + counters::QUORUM_STORE_MSG_COUNT + .with_label_values(&["NetworkListener::shutdown"]) + .inc(); info!("QS: shutdown network listener received"); ack_tx .send(()) @@ -51,6 +54,9 @@ impl NetworkListener { break; }, VerifiedEvent::SignedBatchInfo(signed_batch_infos) => { + counters::QUORUM_STORE_MSG_COUNT + .with_label_values(&["NetworkListener::signedbatchinfo"]) + .inc(); let cmd = ProofCoordinatorCommand::AppendSignature(*signed_batch_infos); self.proof_coordinator_tx .send(cmd) @@ -58,6 +64,9 @@ impl NetworkListener { .expect("Could not send signed_batch_info to proof_coordinator"); }, VerifiedEvent::BatchMsg(batch_msg) => { + counters::QUORUM_STORE_MSG_COUNT + .with_label_values(&["NetworkListener::batchmsg"]) + .inc(); let author = batch_msg.author(); let batches = batch_msg.take(); counters::RECEIVED_BATCH_MSG_COUNT.inc(); @@ -76,6 +85,9 @@ impl NetworkListener { .expect("Could not send remote batch"); }, VerifiedEvent::ProofOfStoreMsg(proofs) => { + counters::QUORUM_STORE_MSG_COUNT + .with_label_values(&["NetworkListener::proofofstore"]) + .inc(); let cmd = ProofManagerCommand::ReceiveProofs(*proofs); self.proof_manager_tx .send(cmd) diff --git a/consensus/src/quorum_store/proof_coordinator.rs b/consensus/src/quorum_store/proof_coordinator.rs index 8d9f9a92468c2..728285250ce6d 100644 --- a/consensus/src/quorum_store/proof_coordinator.rs +++ b/consensus/src/quorum_store/proof_coordinator.rs @@ -195,6 +195,7 @@ impl ProofCoordinator { signed_batch_info.batch_info().clone(), IncrementalProofState::new(signed_batch_info.batch_info().clone()), ); + #[allow(deprecated)] self.batch_info_to_time .entry(signed_batch_info.batch_info().clone()) .or_insert(chrono::Utc::now().naive_utc().timestamp_micros() as u64); @@ -228,11 +229,15 @@ impl ProofCoordinator { self.proof_cache .insert(proof.info().clone(), proof.multi_signature().clone()); // quorum store measurements + #[allow(deprecated)] let duration = chrono::Utc::now().naive_utc().timestamp_micros() as u64 - self .batch_info_to_time .remove(signed_batch_info.batch_info()) - .expect("Batch created without recording the time!"); + .ok_or( + // Batch created without recording the time! + SignedBatchInfoError::NoTimeStamps, + )?; counters::BATCH_TO_POS_DURATION.observe_duration(Duration::from_micros(duration)); return Ok(Some(proof)); } @@ -304,12 +309,14 @@ impl ProofCoordinator { Some(command) = rx.recv() => monitor!("proof_coordinator_handle_command", { match command { ProofCoordinatorCommand::Shutdown(ack_tx) => { + counters::QUORUM_STORE_MSG_COUNT.with_label_values(&["ProofCoordinator::shutdown"]).inc(); ack_tx .send(()) .expect("Failed to send shutdown ack to QuorumStore"); break; }, ProofCoordinatorCommand::CommitNotification(batches) => { + counters::QUORUM_STORE_MSG_COUNT.with_label_values(&["ProofCoordinator::commit_notification"]).inc(); for batch in batches { let digest = batch.digest(); if let Entry::Occupied(existing_proof) = self.batch_info_to_proof.entry(batch.clone()) { diff --git a/consensus/src/quorum_store/proof_manager.rs b/consensus/src/quorum_store/proof_manager.rs index 3cdac9a768ad3..a33e0c1165292 100644 --- a/consensus/src/quorum_store/proof_manager.rs +++ b/consensus/src/quorum_store/proof_manager.rs @@ -4,138 +4,37 @@ use super::batch_store::BatchStore; use crate::{ monitor, - quorum_store::{ - batch_generator::BackPressure, - counters, - utils::{BatchSortKey, ProofQueue}, - }, + quorum_store::{batch_generator::BackPressure, batch_proof_queue::BatchProofQueue, counters}, }; use aptos_consensus_types::{ - common::{Payload, PayloadFilter, ProofWithData}, + common::{Payload, PayloadFilter, ProofWithData, TxnSummaryWithExpiration}, + payload::{OptQuorumStorePayload, PayloadExecutionLimit}, proof_of_store::{BatchInfo, ProofOfStore, ProofOfStoreMsg}, request_response::{GetPayloadCommand, GetPayloadResponse}, + utils::PayloadTxnsSize, }; use aptos_logger::prelude::*; -use aptos_types::{transaction::SignedTransaction, PeerId}; +use aptos_types::PeerId; use futures::StreamExt; use futures_channel::mpsc::Receiver; -use rand::{seq::SliceRandom, thread_rng}; -use std::{ - cmp::min, - collections::{BTreeMap, HashMap, HashSet}, - sync::Arc, -}; +use std::{cmp::min, collections::HashSet, sync::Arc, time::Duration}; #[derive(Debug)] pub enum ProofManagerCommand { ReceiveProofs(ProofOfStoreMsg), - ReceiveBatches(Vec), + ReceiveBatches(Vec<(BatchInfo, Vec)>), CommitNotification(u64, Vec), Shutdown(tokio::sync::oneshot::Sender<()>), } -pub struct BatchQueue { - batch_store: Arc, - // Queue per peer to ensure fairness between peers and priority within peer - author_to_batches: HashMap>, -} - -impl BatchQueue { - pub fn new(batch_store: Arc) -> Self { - Self { - batch_store, - author_to_batches: HashMap::new(), - } - } - - pub fn add_batches(&mut self, batches: Vec) { - for batch in batches.into_iter() { - let queue = self.author_to_batches.entry(batch.author()).or_default(); - queue.insert(BatchSortKey::from_info(&batch), batch.clone()); - } - } - - pub fn remove_batch(&mut self, batch: &BatchInfo) { - if let Some(batch_tree) = self.author_to_batches.get_mut(&batch.author()) { - batch_tree.remove(&BatchSortKey::from_info(batch)); - } - } - - pub fn remove_expired_batches(&mut self) { - let authors = self.author_to_batches.keys().cloned().collect::>(); - for author in authors { - if let Some(batch_tree) = self.author_to_batches.get_mut(&author) { - batch_tree.retain(|_batch_key, batch| !batch.is_expired()); - } - } - } - - pub fn len(&self) -> usize { - self.author_to_batches - .values() - .map(|batch_tree| batch_tree.len()) - .sum() - } - - pub fn pull_batches( - &mut self, - max_txns: u64, - max_bytes: u64, - excluded_batches: Vec, - ) -> Vec<(BatchInfo, Vec)> { - let mut result: Vec<(BatchInfo, Vec)> = vec![]; - let mut num_txns = 0; - let mut num_bytes = 0; - let mut iters = vec![]; - let mut full = false; - for (_, batches) in self.author_to_batches.iter() { - iters.push(batches.iter().rev()); - } - while !iters.is_empty() { - iters.shuffle(&mut thread_rng()); - iters.retain_mut(|iter| { - if full { - return false; - } - if let Some((_sort_key, batch)) = iter.next() { - if excluded_batches.contains(batch) { - true - } else if num_txns + batch.num_txns() <= max_txns - && num_bytes + batch.num_bytes() <= max_bytes - { - if let Ok(mut persisted_value) = - self.batch_store.get_batch_from_local(batch.digest()) - { - if let Some(txns) = persisted_value.take_payload() { - num_txns += batch.num_txns(); - num_bytes += batch.num_bytes(); - result.push((batch.clone(), txns.clone())); - } - } else { - warn!("Couldn't find a batch in local storage while creating inline block: {:?}", batch.digest()); - } - true - } else { - full = true; - false - } - } else { - false - } - }) - } - result - } -} - pub struct ProofManager { - proofs_for_consensus: ProofQueue, - batch_queue: BatchQueue, + batch_proof_queue: BatchProofQueue, back_pressure_total_txn_limit: u64, remaining_total_txn_num: u64, back_pressure_total_proof_limit: u64, remaining_total_proof_num: u64, allow_batches_without_pos_in_proposal: bool, + enable_opt_quorum_store: bool, } impl ProofManager { @@ -145,31 +44,40 @@ impl ProofManager { back_pressure_total_proof_limit: u64, batch_store: Arc, allow_batches_without_pos_in_proposal: bool, + enable_opt_quorum_store: bool, ) -> Self { Self { - proofs_for_consensus: ProofQueue::new(my_peer_id), - batch_queue: BatchQueue::new(batch_store), + batch_proof_queue: BatchProofQueue::new(my_peer_id, batch_store), back_pressure_total_txn_limit, remaining_total_txn_num: 0, back_pressure_total_proof_limit, remaining_total_proof_num: 0, allow_batches_without_pos_in_proposal, + enable_opt_quorum_store, } } pub(crate) fn receive_proofs(&mut self, proofs: Vec) { for proof in proofs.into_iter() { - self.batch_queue.remove_batch(proof.info()); - self.proofs_for_consensus.push(proof); + self.batch_proof_queue.insert_proof(proof); } - (self.remaining_total_txn_num, self.remaining_total_proof_num) = - self.proofs_for_consensus.remaining_txns_and_proofs(); + self.update_remaining_txns_and_proofs(); } - pub(crate) fn receive_batches(&mut self, batches: Vec) { - if self.allow_batches_without_pos_in_proposal { - self.batch_queue.add_batches(batches); - } + fn update_remaining_txns_and_proofs(&mut self) { + sample!( + SampleRate::Duration(Duration::from_millis(200)), + (self.remaining_total_txn_num, self.remaining_total_proof_num) = + self.batch_proof_queue.remaining_txns_and_proofs(); + ); + } + + pub(crate) fn receive_batches( + &mut self, + batch_summaries: Vec<(BatchInfo, Vec)>, + ) { + self.batch_proof_queue.insert_batches(batch_summaries); + self.update_remaining_txns_and_proofs(); } pub(crate) fn handle_commit_notification( @@ -181,29 +89,16 @@ impl ProofManager { "QS: got clean request from execution at block timestamp {}", block_timestamp ); - self.batch_queue.remove_expired_batches(); - for batch in &batches { - self.batch_queue.remove_batch(batch); - } - self.proofs_for_consensus.mark_committed(batches); - self.proofs_for_consensus + self.batch_proof_queue.mark_committed(batches); + self.batch_proof_queue .handle_updated_block_timestamp(block_timestamp); - (self.remaining_total_txn_num, self.remaining_total_proof_num) = - self.proofs_for_consensus.remaining_txns_and_proofs(); + self.update_remaining_txns_and_proofs(); } pub(crate) fn handle_proposal_request(&mut self, msg: GetPayloadCommand) { match msg { - GetPayloadCommand::GetPayloadRequest( - max_txns, - max_bytes, - max_inline_txns, - max_inline_bytes, - return_non_full, - filter, - callback, - ) => { - let excluded_batches: HashSet<_> = match filter { + GetPayloadCommand::GetPayloadRequest(request) => { + let excluded_batches: HashSet<_> = match request.filter { PayloadFilter::Empty => HashSet::new(), PayloadFilter::DirectMempool(_) => { unreachable!() @@ -211,61 +106,109 @@ impl ProofManager { PayloadFilter::InQuorumStore(proofs) => proofs, }; - let (proof_block, proof_queue_fully_utilized) = self - .proofs_for_consensus - .pull_proofs(&excluded_batches, max_txns, max_bytes, return_non_full); + let max_txns_with_proof = request + .max_txns + .compute_pct(100 - request.opt_batch_txns_pct); + + let ( + proof_block, + txns_with_proof_size, + cur_unique_txns, + proof_queue_fully_utilized, + ) = self.batch_proof_queue.pull_proofs( + &excluded_batches, + max_txns_with_proof, + request.max_txns_after_filtering, + request.soft_max_txns_after_filtering, + request.return_non_full, + request.block_timestamp, + ); - counters::NUM_BATCHES_WITHOUT_PROOF_OF_STORE.observe(self.batch_queue.len() as f64); + counters::NUM_BATCHES_WITHOUT_PROOF_OF_STORE + .observe(self.batch_proof_queue.num_batches_without_proof() as f64); counters::PROOF_QUEUE_FULLY_UTILIZED .observe(if proof_queue_fully_utilized { 1.0 } else { 0.0 }); - let mut inline_block: Vec<(BatchInfo, Vec)> = vec![]; - let cur_txns: u64 = proof_block.iter().map(|p| p.num_txns()).sum(); - let cur_bytes: u64 = proof_block.iter().map(|p| p.num_bytes()).sum(); - - if self.allow_batches_without_pos_in_proposal && proof_queue_fully_utilized { - inline_block = self.batch_queue.pull_batches( - min(max_txns - cur_txns, max_inline_txns), - min(max_bytes - cur_bytes, max_inline_bytes), - excluded_batches + let (opt_batches, opt_batch_txns_size) = if self.enable_opt_quorum_store { + // TODO(ibalajiarun): Support unique txn calculation + let max_opt_batch_txns_size = request.max_txns - txns_with_proof_size; + let (opt_batches, opt_payload_size, _) = self.batch_proof_queue.pull_batches( + &excluded_batches .iter() .cloned() .chain(proof_block.iter().map(|proof| proof.info().clone())) .collect(), + max_opt_batch_txns_size, + request.max_txns_after_filtering, + request.soft_max_txns_after_filtering, + request.return_non_full, + request.block_timestamp, ); - } - let inline_txns = inline_block - .iter() - .map(|(_, txns)| txns.len()) - .sum::(); - counters::NUM_INLINE_BATCHES.observe(inline_block.len() as f64); - counters::NUM_INLINE_TXNS.observe(inline_txns as f64); - let res = GetPayloadResponse::GetPayloadResponse( - if proof_block.is_empty() && inline_block.is_empty() { - Payload::empty(true, self.allow_batches_without_pos_in_proposal) - } else if inline_block.is_empty() { - trace!( - "QS: GetBlockRequest excluded len {}, block len {}", - excluded_batches.len(), - proof_block.len() - ); - Payload::InQuorumStore(ProofWithData::new(proof_block)) + (opt_batches, opt_payload_size) + } else { + (Vec::new(), PayloadTxnsSize::zero()) + }; + + let cur_txns = txns_with_proof_size + opt_batch_txns_size; + let (inline_block, inline_block_size) = + if self.allow_batches_without_pos_in_proposal && proof_queue_fully_utilized { + let mut max_inline_txns_to_pull = request + .max_txns + .saturating_sub(cur_txns) + .minimum(request.max_inline_txns); + max_inline_txns_to_pull.set_count(min( + max_inline_txns_to_pull.count(), + request + .max_txns_after_filtering + .saturating_sub(cur_unique_txns), + )); + let (inline_batches, inline_payload_size, _) = + self.batch_proof_queue.pull_batches_with_transactions( + &excluded_batches + .iter() + .cloned() + .chain(proof_block.iter().map(|proof| proof.info().clone())) + .collect(), + max_inline_txns_to_pull, + request.max_txns_after_filtering, + request.soft_max_txns_after_filtering, + request.return_non_full, + request.block_timestamp, + ); + (inline_batches, inline_payload_size) } else { - trace!( - "QS: GetBlockRequest excluded len {}, block len {}, inline len {}", - excluded_batches.len(), - proof_block.len(), - inline_block.len() - ); - Payload::QuorumStoreInlineHybrid( - inline_block, - ProofWithData::new(proof_block), - None, - ) - }, - ); - match callback.send(Ok(res)) { + (Vec::new(), PayloadTxnsSize::zero()) + }; + counters::NUM_INLINE_BATCHES.observe(inline_block.len() as f64); + counters::NUM_INLINE_TXNS.observe(inline_block_size.count() as f64); + + let response = if self.enable_opt_quorum_store { + let inline_batches = inline_block.into(); + Payload::OptQuorumStore(OptQuorumStorePayload::new( + inline_batches, + opt_batches.into(), + proof_block.into(), + PayloadExecutionLimit::None, + )) + } else if proof_block.is_empty() && inline_block.is_empty() { + Payload::empty(true, self.allow_batches_without_pos_in_proposal) + } else { + trace!( + "QS: GetBlockRequest excluded len {}, block len {}, inline len {}", + excluded_batches.len(), + proof_block.len(), + inline_block.len() + ); + Payload::QuorumStoreInlineHybrid( + inline_block, + ProofWithData::new(proof_block), + None, + ) + }; + + let res = GetPayloadResponse::GetPayloadResponse(response); + match request.callback.send(Ok(res)) { Ok(_) => (), Err(err) => debug!("BlockResponse receiver not available! error {:?}", err), } @@ -275,6 +218,21 @@ impl ProofManager { /// return true when quorum store is back pressured pub(crate) fn qs_back_pressure(&self) -> BackPressure { + if self.remaining_total_txn_num > self.back_pressure_total_txn_limit + || self.remaining_total_proof_num > self.back_pressure_total_proof_limit + { + sample!( + SampleRate::Duration(Duration::from_millis(200)), + info!( + "Quorum store is back pressured with {} txns, limit: {}, proofs: {}, limit: {}", + self.remaining_total_txn_num, + self.back_pressure_total_txn_limit, + self.remaining_total_proof_num, + self.back_pressure_total_proof_limit + ); + ); + } + BackPressure { txn_count: self.remaining_total_txn_num > self.back_pressure_total_txn_limit, proof_count: self.remaining_total_proof_num > self.back_pressure_total_proof_limit, @@ -311,18 +269,22 @@ impl ProofManager { monitor!("proof_manager_handle_command", { match msg { ProofManagerCommand::Shutdown(ack_tx) => { + counters::QUORUM_STORE_MSG_COUNT.with_label_values(&["ProofManager::shutdown"]).inc(); ack_tx .send(()) .expect("Failed to send shutdown ack to QuorumStore"); break; }, ProofManagerCommand::ReceiveProofs(proofs) => { + counters::QUORUM_STORE_MSG_COUNT.with_label_values(&["ProofManager::receive_proofs"]).inc(); self.receive_proofs(proofs.take()); }, ProofManagerCommand::ReceiveBatches(batches) => { + counters::QUORUM_STORE_MSG_COUNT.with_label_values(&["ProofManager::receive_batches"]).inc(); self.receive_batches(batches); } ProofManagerCommand::CommitNotification(block_timestamp, batches) => { + counters::QUORUM_STORE_MSG_COUNT.with_label_values(&["ProofManager::commit_notification"]).inc(); self.handle_commit_notification( block_timestamp, batches, diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 1dca357991396..3a5edb1f9a323 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -3,11 +3,11 @@ use super::quorum_store_db::QuorumStoreStorage; use crate::{ - consensus_observer::publisher::ConsensusPublisher, + consensus_observer::publisher::consensus_publisher::ConsensusPublisher, error::error_kind, network::{IncomingBatchRetrievalRequest, NetworkSender}, network_interface::ConsensusMsg, - payload_manager::PayloadManager, + payload_manager::{DirectMempoolPayloadManager, QuorumStorePayloadManager, TPayloadManager}, quorum_store::{ batch_coordinator::{BatchCoordinator, BatchCoordinatorCommand}, batch_generator::{BackPressure, BatchGenerator, BatchGeneratorCommand}, @@ -51,7 +51,7 @@ impl QuorumStoreBuilder { &mut self, consensus_publisher: Option>, ) -> ( - Arc, + Arc, Option>, ) { match self { @@ -100,10 +100,10 @@ impl DirectMempoolInnerBuilder { fn init_payload_manager( &mut self, ) -> ( - Arc, + Arc, Option>, ) { - (Arc::from(PayloadManager::DirectMempool), None) + (Arc::from(DirectMempoolPayloadManager::new()), None) } fn start(self) { @@ -268,6 +268,7 @@ impl InnerBuilder { batch_reader } + #[allow(clippy::unwrap_used)] fn spawn_quorum_store( mut self, ) -> ( @@ -364,6 +365,7 @@ impl InnerBuilder { * self.num_validators, self.batch_store.clone().unwrap(), self.config.allow_batches_without_pos_in_proposal, + self.config.enable_opt_quorum_store, ); spawn_named!( "proof_manager", @@ -432,17 +434,18 @@ impl InnerBuilder { &mut self, consensus_publisher: Option>, ) -> ( - Arc, + Arc, Option>, ) { let batch_reader = self.create_batch_store(); ( - Arc::from(PayloadManager::InQuorumStore( + Arc::from(QuorumStorePayloadManager::new( batch_reader, // TODO: remove after splitting out clean requests self.coordinator_tx.clone(), consensus_publisher, + self.verifier.get_ordered_account_addresses(), )), Some(self.quorum_store_msg_tx.clone()), ) diff --git a/consensus/src/quorum_store/quorum_store_coordinator.rs b/consensus/src/quorum_store/quorum_store_coordinator.rs index 089e74c499be8..000c99c2cc7c4 100644 --- a/consensus/src/quorum_store/quorum_store_coordinator.rs +++ b/consensus/src/quorum_store/quorum_store_coordinator.rs @@ -5,7 +5,7 @@ use crate::{ monitor, quorum_store::{ batch_coordinator::BatchCoordinatorCommand, batch_generator::BatchGeneratorCommand, - proof_coordinator::ProofCoordinatorCommand, proof_manager::ProofManagerCommand, + counters, proof_coordinator::ProofCoordinatorCommand, proof_manager::ProofManagerCommand, }, round_manager::VerifiedEvent, }; @@ -54,6 +54,9 @@ impl QuorumStoreCoordinator { monitor!("quorum_store_coordinator_loop", { match cmd { CoordinatorCommand::CommitNotification(block_timestamp, batches) => { + counters::QUORUM_STORE_MSG_COUNT + .with_label_values(&["QSCoordinator::commit_notification"]) + .inc(); // TODO: need a callback or not? self.proof_coordinator_cmd_tx .send(ProofCoordinatorCommand::CommitNotification(batches.clone())) @@ -77,6 +80,9 @@ impl QuorumStoreCoordinator { .expect("Failed to send to BatchGenerator"); }, CoordinatorCommand::Shutdown(ack_tx) => { + counters::QUORUM_STORE_MSG_COUNT + .with_label_values(&["QSCoordinator::shutdown"]) + .inc(); // Note: Shutdown is done from the back of the quorum store pipeline to the // front, so senders are always shutdown before receivers. This avoids sending // messages through closed channels during shutdown. diff --git a/consensus/src/quorum_store/quorum_store_db.rs b/consensus/src/quorum_store/quorum_store_db.rs index 147c4cfc5e30b..fccb002fc0b32 100644 --- a/consensus/src/quorum_store/quorum_store_db.rs +++ b/consensus/src/quorum_store/quorum_store_db.rs @@ -109,8 +109,7 @@ impl QuorumStoreStorage for QuorumStoreDB { for (epoch, batch_id) in epoch_batch_id { assert!(current_epoch >= epoch); if epoch < current_epoch { - self.delete_batch_id(epoch) - .expect("Could not delete from db"); + self.delete_batch_id(epoch)?; } else { ret = Some(batch_id); } @@ -123,41 +122,53 @@ impl QuorumStoreStorage for QuorumStoreDB { } } -pub(crate) struct MockQuorumStoreDB {} +#[cfg(test)] +pub(crate) use mock::MockQuorumStoreDB; -impl MockQuorumStoreDB { - #[cfg(test)] - pub fn new() -> Self { - Self {} - } -} +#[cfg(test)] +pub mod mock { + use super::*; + pub struct MockQuorumStoreDB {} -impl QuorumStoreStorage for MockQuorumStoreDB { - fn delete_batches(&self, _: Vec) -> Result<(), DbError> { - Ok(()) + impl MockQuorumStoreDB { + pub fn new() -> Self { + Self {} + } } - fn get_all_batches(&self) -> Result> { - Ok(HashMap::new()) + impl Default for MockQuorumStoreDB { + fn default() -> Self { + Self::new() + } } - fn save_batch(&self, _: PersistedValue) -> Result<(), DbError> { - Ok(()) - } + impl QuorumStoreStorage for MockQuorumStoreDB { + fn delete_batches(&self, _: Vec) -> Result<(), DbError> { + Ok(()) + } - fn get_batch(&self, _: &HashValue) -> Result, DbError> { - Ok(None) - } + fn get_all_batches(&self) -> Result> { + Ok(HashMap::new()) + } - fn delete_batch_id(&self, _: u64) -> Result<(), DbError> { - Ok(()) - } + fn save_batch(&self, _: PersistedValue) -> Result<(), DbError> { + Ok(()) + } - fn clean_and_get_batch_id(&self, _: u64) -> Result, DbError> { - Ok(Some(BatchId::new_for_test(0))) - } + fn get_batch(&self, _: &HashValue) -> Result, DbError> { + Ok(None) + } - fn save_batch_id(&self, _: u64, _: BatchId) -> Result<(), DbError> { - Ok(()) + fn delete_batch_id(&self, _: u64) -> Result<(), DbError> { + Ok(()) + } + + fn clean_and_get_batch_id(&self, _: u64) -> Result, DbError> { + Ok(Some(BatchId::new_for_test(0))) + } + + fn save_batch_id(&self, _: u64, _: BatchId) -> Result<(), DbError> { + Ok(()) + } } } diff --git a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs new file mode 100644 index 0000000000000..2741ea3a6a912 --- /dev/null +++ b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs @@ -0,0 +1,788 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::quorum_store::{ + batch_proof_queue::BatchProofQueue, tests::batch_store_test::batch_store_for_test, +}; +use aptos_consensus_types::{ + common::TxnSummaryWithExpiration, + proof_of_store::{BatchId, BatchInfo, ProofOfStore}, + utils::PayloadTxnsSize, +}; +use aptos_crypto::HashValue; +use aptos_types::{aggregate_signature::AggregateSignature, PeerId}; +use maplit::hashset; +use std::{collections::HashSet, time::Duration}; + +/// Return a ProofOfStore with minimal fields used by ProofQueue tests. +fn proof_of_store( + author: PeerId, + batch_id: BatchId, + gas_bucket_start: u64, + expiration: u64, +) -> ProofOfStore { + ProofOfStore::new( + BatchInfo::new( + author, + batch_id, + 0, + expiration, + HashValue::random(), + 1, + 1, + gas_bucket_start, + ), + AggregateSignature::empty(), + ) +} + +fn proof_of_store_with_size( + author: PeerId, + batch_id: BatchId, + gas_bucket_start: u64, + expiration: u64, + num_txns: u64, +) -> ProofOfStore { + ProofOfStore::new( + BatchInfo::new( + author, + batch_id, + 0, + expiration, + HashValue::random(), + num_txns, + num_txns, + gas_bucket_start, + ), + AggregateSignature::empty(), + ) +} + +#[test] +fn test_proof_queue_sorting() { + let my_peer_id = PeerId::random(); + let batch_store = batch_store_for_test(5 * 1024 * 1024); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + + let author_0 = PeerId::random(); + let author_1 = PeerId::random(); + + let author_0_batches = vec![ + proof_of_store(author_0, BatchId::new_for_test(0), 100, 1), + proof_of_store(author_0, BatchId::new_for_test(1), 200, 1), + proof_of_store(author_0, BatchId::new_for_test(2), 50, 1), + proof_of_store(author_0, BatchId::new_for_test(3), 300, 1), + ]; + for batch in author_0_batches { + proof_queue.insert_proof(batch); + } + let author_1_batches = vec![ + proof_of_store(author_1, BatchId::new_for_test(4), 500, 1), + proof_of_store(author_1, BatchId::new_for_test(5), 400, 1), + proof_of_store(author_1, BatchId::new_for_test(6), 600, 1), + proof_of_store(author_1, BatchId::new_for_test(7), 50, 1), + ]; + for batch in author_1_batches { + proof_queue.insert_proof(batch); + } + + // Expect: [600, 300] + let (pulled, _, num_unique_txns, _) = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(4, 10), + 2, + 2, + true, + aptos_infallible::duration_since_epoch(), + ); + let mut count_author_0 = 0; + let mut count_author_1 = 0; + let mut prev: Option<&ProofOfStore> = None; + for batch in &pulled { + if let Some(prev) = prev { + assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); + } else { + assert_eq!(batch.gas_bucket_start(), 600); + } + if batch.author() == author_0 { + count_author_0 += 1; + } else { + count_author_1 += 1; + } + prev = Some(batch); + } + assert_eq!(count_author_0, 1); + assert_eq!(count_author_1, 1); + assert_eq!(num_unique_txns, 2); + + // Expect: [600, 500, 300, 100] + let (pulled, _, num_unique_txns, _) = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(6, 10), + 4, + 4, + true, + aptos_infallible::duration_since_epoch(), + ); + let mut count_author_0 = 0; + let mut count_author_1 = 0; + let mut prev: Option<&ProofOfStore> = None; + for batch in &pulled { + if let Some(prev) = prev { + assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); + } else { + assert_eq!(batch.gas_bucket_start(), 600); + } + if batch.author() == author_0 { + count_author_0 += 1; + } else { + count_author_1 += 1; + } + prev = Some(batch); + } + assert_eq!(num_unique_txns, 4); + assert_eq!(count_author_0, 2); + assert_eq!(count_author_1, 2); +} + +#[test] +fn test_proof_calculate_remaining_txns_and_proofs() { + let my_peer_id = PeerId::random(); + let batch_store = batch_store_for_test(5 * 1024 * 1024); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let now_in_secs = aptos_infallible::duration_since_epoch().as_secs() as u64; + let now_in_usecs = aptos_infallible::duration_since_epoch().as_micros() as u64; + let author_0 = PeerId::random(); + let author_1 = PeerId::random(); + let txns = vec![ + TxnSummaryWithExpiration::new(PeerId::ONE, 0, now_in_secs + 1, HashValue::zero()), + TxnSummaryWithExpiration::new(PeerId::ONE, 1, now_in_secs + 1, HashValue::zero()), + TxnSummaryWithExpiration::new(PeerId::ONE, 2, now_in_secs + 1, HashValue::zero()), + TxnSummaryWithExpiration::new(PeerId::ONE, 3, now_in_secs + 1, HashValue::zero()), + ]; + + let author_0_batches = vec![ + proof_of_store( + author_0, + BatchId::new_for_test(0), + 100, + now_in_usecs + 50000, + ), + proof_of_store( + author_0, + BatchId::new_for_test(1), + 200, + now_in_usecs + 70000, + ), + proof_of_store(author_0, BatchId::new_for_test(2), 50, now_in_usecs + 20000), + proof_of_store( + author_0, + BatchId::new_for_test(3), + 300, + now_in_usecs + 10000, + ), + ]; + + let author_1_batches = vec![ + proof_of_store( + author_1, + BatchId::new_for_test(4), + 500, + now_in_usecs + 20000, + ), + proof_of_store( + author_1, + BatchId::new_for_test(5), + 400, + now_in_usecs + 30000, + ), + proof_of_store( + author_1, + BatchId::new_for_test(6), + 600, + now_in_usecs + 50000, + ), + proof_of_store(author_1, BatchId::new_for_test(7), 50, now_in_usecs + 60000), + ]; + + let info_1 = author_0_batches[0].info().clone(); + let info_2 = author_0_batches[1].info().clone(); + let info_3 = author_0_batches[2].info().clone(); + let info_4 = author_0_batches[3].info().clone(); + let info_5 = author_1_batches[0].info().clone(); + let info_6 = author_1_batches[1].info().clone(); + let info_7 = author_1_batches[2].info().clone(); + let info_8 = author_1_batches[3].info().clone(); + + proof_queue.insert_batches(vec![(info_1.clone(), vec![txns[0]])]); + // batch_summaries: [1 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (0, 0)); + assert_eq!(proof_queue.batch_summaries_len(), 1); + + proof_queue.insert_proof(author_0_batches[0].clone()); + // txns: [txn_0] + // proofs: [1] + // batch_summaries: [1 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (1, 1)); + assert_eq!(proof_queue.batch_summaries_len(), 1); + + proof_queue.insert_proof(author_0_batches[1].clone()); + // txns: [txn_0] + txns(proof_2) + // proofs: [1, 2] + // batch_summaries: [1 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 1); + + proof_queue.insert_batches(vec![(info_2, vec![txns[1]])]); + // txns: [txn_0, txn_1] + // proofs: [1, 2] + // batch_summaries: [1 -> txn_0, 2 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 2); + + proof_queue.insert_batches(vec![(info_3.clone(), vec![txns[0]])]); + // txns: [txn_0, txn_1] + // proofs: [1, 2] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + // Adding the batch again shouldn't have an effect + proof_queue.insert_batches(vec![(info_3.clone(), vec![txns[0]])]); + // txns: [txn_0, txn_1] + // proofs: [1, 2] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_proof(author_0_batches[2].clone()); + // txns: [txn_0, txn_1] + // proofs: [1, 2, 3] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 3)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + // Adding the batch again shouldn't have an effect + proof_queue.insert_batches(vec![(info_3.clone(), vec![txns[0]])]); + // txns: [txn_0, txn_1] + // proofs: [1, 2, 3] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 3)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_proof(author_1_batches[0].clone()); + // txns: [txn_0, txn_1] + txns(proof_5) + // proofs: [1, 2, 3, 5] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (3, 4)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_batches(vec![(info_5, vec![txns[1]])]); + // txns: [txn_0, txn_1] + // proofs: [1, 2, 3, 5] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0, 5 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 4)); + assert_eq!(proof_queue.batch_summaries_len(), 4); + + proof_queue.insert_batches(vec![(info_4, vec![txns[2]])]); + // txns: [txn_0, txn_1] + // proofs: [1, 2, 3, 5] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0, 4 -> txn_2, 5 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 4)); + assert_eq!(proof_queue.batch_summaries_len(), 5); + + proof_queue.insert_proof(author_0_batches[3].clone()); + // txns: [txn_0, txn_1, txn_2] + // proofs: [1, 2, 3, 4, 5] + // batch_summaries: [1 -> txn_0, 2 -> txn_1, 3 -> txn_0, 4 -> txn_2, 5 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (3, 5)); + assert_eq!(proof_queue.batch_summaries_len(), 5); + + proof_queue.mark_committed(vec![info_1.clone()]); + // txns: [txn_0, txn_1, txn_2] + // proofs: [2, 3, 4, 5] + // batch_summaries: [2 -> txn_1, 3 -> txn_0, 4 -> txn_2, 5 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (3, 4)); + assert_eq!(proof_queue.batch_summaries_len(), 4); + + proof_queue.insert_proof(author_1_batches[1].clone()); + // txns: [txn_0, txn_1, txn_2] + txns(proof_6) + // proofs: [2, 3, 4, 5, 6] + // batch_summaries: [2 -> txn_1, 3 -> txn_0, 4 -> txn_2, 5 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (4, 5)); + assert_eq!(proof_queue.batch_summaries_len(), 4); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 20000); + // Expires info_3, info_4, info_5 + // txns: [txn_1] + txns(proof_6) + // proofs: [2, 6] + // batch_summaries: [2 -> txn_1] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 1); + + // Adding an expired batch again + proof_queue.insert_batches(vec![(info_3, vec![txns[0]])]); + // txns: [txn_1] + txns(proof_6) + // proofs: [2, 6] + // batch_summaries: [2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 2); + + // Adding an expired proof again. Should have no effect + proof_queue.insert_proof(author_0_batches[2].clone()); + // txns: [txn_1] + txns(proof_6) + // proofs: [2, 6] + // batch_summaries: [2 -> txn_1, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 2); + + proof_queue.insert_batches(vec![(info_7, vec![txns[3]])]); + // txns: [txn_1] + txns(proof_6) + // proofs: [2, 6] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 3 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 30000); + // Expires info_6, info_3 + // txns: [txn_1] + // proofs: [2] + // batch_summaries: [2 -> txn_1, 7 -> txn_3] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (1, 1)); + assert_eq!(proof_queue.batch_summaries_len(), 2); + + proof_queue.insert_batches(vec![(info_6, vec![txns[0]])]); + // Expired batch not added to batch summaries + // txns: [txn_1] + // proofs: [2] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 6 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (1, 1)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_proof(author_1_batches[2].clone()); + // txns: [txn_1, txn_3] + // proofs: [2, 7] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 6 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_proof(author_1_batches[3].clone()); + // txns: [txn_1, txn_3] + txns(proof_8) + // proofs: [2, 7, 8] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 6 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (3, 3)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.mark_committed(vec![info_8.clone()]); + // txns: [txn_1, txn_3] + // proofs: [2, 7] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 6 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_batches(vec![(info_8, vec![txns[0]])]); + // Committed batch not added to batch summaries + // txns: [txn_1, txn_3] + // proofs: [2, 7] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 6 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.insert_proof(author_1_batches[3].clone()); + // Committed proof added again. Should have no effect + // txns: [txn_1, txn_3] + // proofs: [2, 7, 8] + // batch_summaries: [2 -> txn_1, 7 -> txn_3, 6 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (2, 2)); + assert_eq!(proof_queue.batch_summaries_len(), 3); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 70000); + // Expires info_2, info_7 + // txns: [] + // proofs: [] + // batch_summaries: [3 -> txn_0, 6 -> txn_0, 8 -> txn_0] + assert_eq!(proof_queue.remaining_txns_and_proofs(), (0, 0)); + assert_eq!(proof_queue.batch_summaries_len(), 0); +} + +#[test] +fn test_proof_pull_proofs_with_duplicates() { + let my_peer_id = PeerId::random(); + let batch_store = batch_store_for_test(5 * 1024 * 1024); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let now_in_secs = aptos_infallible::duration_since_epoch().as_secs() as u64; + let now_in_usecs = now_in_secs * 1_000_000; + let txns = vec![ + TxnSummaryWithExpiration::new(PeerId::ONE, 0, now_in_secs + 2, HashValue::zero()), + TxnSummaryWithExpiration::new(PeerId::ONE, 1, now_in_secs + 1, HashValue::zero()), + TxnSummaryWithExpiration::new(PeerId::ONE, 2, now_in_secs + 3, HashValue::zero()), + TxnSummaryWithExpiration::new(PeerId::ONE, 3, now_in_secs + 4, HashValue::zero()), + ]; + + let author_0 = PeerId::random(); + let author_1 = PeerId::random(); + + let author_0_batches = vec![ + proof_of_store( + author_0, + BatchId::new_for_test(0), + 100, + now_in_usecs + 1_100_000, + ), + proof_of_store( + author_0, + BatchId::new_for_test(1), + 200, + now_in_usecs + 3_000_000, + ), + proof_of_store( + author_0, + BatchId::new_for_test(2), + 50, + now_in_usecs + 5_000_000, + ), + proof_of_store( + author_0, + BatchId::new_for_test(3), + 300, + now_in_usecs + 4_000_000, + ), + ]; + + let author_1_batches = vec![ + proof_of_store( + author_1, + BatchId::new_for_test(4), + 500, + now_in_usecs + 4_000_000, + ), + proof_of_store( + author_1, + BatchId::new_for_test(5), + 400, + now_in_usecs + 2_500_000, + ), + proof_of_store( + author_1, + BatchId::new_for_test(6), + 600, + now_in_usecs + 3_500_000, + ), + proof_of_store( + author_1, + BatchId::new_for_test(7), + 50, + now_in_usecs + 4_500_000, + ), + ]; + + let info_0 = author_0_batches[0].info().clone(); + let info_7 = author_1_batches[2].info().clone(); + + proof_queue.insert_batches(vec![(author_0_batches[0].info().clone(), vec![txns[0]])]); + proof_queue.insert_batches(vec![(author_0_batches[1].info().clone(), vec![txns[1]])]); + proof_queue.insert_batches(vec![(author_0_batches[2].info().clone(), vec![txns[2]])]); + proof_queue.insert_batches(vec![(author_0_batches[3].info().clone(), vec![txns[0]])]); + + for batch in author_0_batches { + proof_queue.insert_proof(batch); + } + + proof_queue.insert_batches(vec![(author_1_batches[0].info().clone(), vec![txns[1]])]); + proof_queue.insert_batches(vec![(author_1_batches[1].info().clone(), vec![txns[2]])]); + proof_queue.insert_batches(vec![(author_1_batches[2].info().clone(), vec![txns[3]])]); + proof_queue.insert_batches(vec![(author_1_batches[3].info().clone(), vec![txns[0]])]); + + for batch in author_1_batches { + proof_queue.insert_proof(batch); + } + assert_eq!(proof_queue.remaining_txns_and_proofs(), (4, 8)); + + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs), + ); + assert_eq!(result.2, 4); + + let mut pulled_txns = HashSet::new(); + for proof in result.0 { + match proof.batch_id() { + BatchId { id: 0, nonce: 0 } => pulled_txns.insert(0), + BatchId { id: 1, nonce: 0 } => pulled_txns.insert(1), + BatchId { id: 2, nonce: 0 } => pulled_txns.insert(2), + BatchId { id: 3, nonce: 0 } => pulled_txns.insert(0), + BatchId { id: 4, nonce: 0 } => pulled_txns.insert(1), + BatchId { id: 5, nonce: 0 } => pulled_txns.insert(2), + BatchId { id: 6, nonce: 0 } => pulled_txns.insert(3), + BatchId { id: 7, nonce: 0 } => pulled_txns.insert(0), + _ => panic!("Unexpected batch id"), + }; + } + assert_eq!(pulled_txns.len(), 4); + + let result = proof_queue.pull_proofs( + &hashset![info_0.clone()], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs), + ); + assert_eq!(result.0.len(), 7); + // filtered_txns: txn_0 (included in excluded batches) + assert_eq!(result.2, 3); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 500_000); + // Nothing changes + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 5, + 5, + true, + Duration::from_micros(now_in_usecs + 500_100), + ); + assert_eq!(result.2, 4); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 1_000_000); + // txn_1 expired + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 5, + 5, + true, + Duration::from_micros(now_in_usecs + 1_000_100), + ); + assert_eq!(result.0.len(), 8); + assert_eq!(result.2, 3); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 1_200_000); + // author_0_batches[0] is removed. txn_1 expired. + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs + 1_200_100), + ); + assert_eq!(result.0.len(), 7); + assert_eq!(result.2, 3); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 2_000_000); + // author_0_batches[0] is removed. txn_0, txn_1 are expired. + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs + 2_000_100), + ); + assert_eq!(result.0.len(), 7); + assert_eq!(result.2, 2); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 2_500_000); + // author_0_batches[0], author_1_batches[1] is removed. txn_0, txn_1 is expired. + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs + 2_500_100), + ); + assert_eq!(result.0.len(), 6); + assert_eq!(result.2, 2); + + let result = proof_queue.pull_proofs( + &hashset![info_7], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs + 2_500_100), + ); + // author_0_batches[0], author_1_batches[1] is removed. author_1_batches[2] is excluded. txn_0, txn_1 are expired. + assert_eq!(result.0.len(), 5); + assert_eq!(result.2, 1); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 3_000_000); + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 8, + 8, + true, + Duration::from_micros(now_in_usecs + 3_000_100), + ); + // author_0_batches[0], author_0_batches[1], author_1_batches[1] are removed. txn_0, txn_1, txn_2 are expired. + assert_eq!(result.0.len(), 5); + assert_eq!(result.2, 1); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 3_500_000); + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs + 3_500_100), + ); + // author_0_batches[0], author_0_batches[1], author_1_batches[1], author_1_batches[2] are removed. txn_0, txn_1, txn_0 are expired. + assert_eq!(result.0.len(), 4); + assert_eq!(result.2, 0); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 4_000_000); + let result = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(8, 400), + 4, + 4, + true, + Duration::from_micros(now_in_usecs + 4_000_100), + ); + // author_0_batches[0], author_0_batches[1], author_0_batches[3], author_1_batches[0], author_1_batches[1], author_1_batches[2] are removed. + // txn_0, txn_1, txn_2 are expired. + assert_eq!(result.0.len(), 2); + assert_eq!(result.2, 0); + + proof_queue.handle_updated_block_timestamp(now_in_usecs + 5_000_000); + assert!(proof_queue.is_empty()); +} + +#[test] +fn test_proof_queue_soft_limit() { + let my_peer_id = PeerId::random(); + let batch_store = batch_store_for_test(5 * 1024 * 1024); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + + let author = PeerId::random(); + + let author_batches = vec![ + proof_of_store_with_size(author, BatchId::new_for_test(0), 100, 1, 10), + proof_of_store_with_size(author, BatchId::new_for_test(1), 200, 1, 10), + proof_of_store_with_size(author, BatchId::new_for_test(2), 200, 1, 10), + ]; + for batch in author_batches { + proof_queue.insert_proof(batch); + } + + let (pulled, _, num_unique_txns, _) = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(100, 100), + 12, + 12, + true, + aptos_infallible::duration_since_epoch(), + ); + + assert_eq!(pulled.len(), 1); + assert_eq!(num_unique_txns, 10); + + let (pulled, _, num_unique_txns, _) = proof_queue.pull_proofs( + &hashset![], + PayloadTxnsSize::new(100, 100), + 30, + 12, + true, + aptos_infallible::duration_since_epoch(), + ); + + assert_eq!(pulled.len(), 2); + assert_eq!(num_unique_txns, 20); +} + +#[test] +fn test_proof_queue_insert_after_commit() { + let my_peer_id = PeerId::random(); + let batch_store = batch_store_for_test(5 * 1024); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + + let author = PeerId::random(); + let author_batches = vec![ + proof_of_store_with_size(author, BatchId::new_for_test(0), 100, 1, 10), + proof_of_store_with_size(author, BatchId::new_for_test(1), 200, 1, 10), + proof_of_store_with_size(author, BatchId::new_for_test(2), 200, 1, 10), + ]; + let batch_infos = author_batches + .iter() + .map(|proof| proof.info().clone()) + .collect(); + + proof_queue.mark_committed(batch_infos); + + for proof in author_batches { + proof_queue.insert_proof(proof); + } + + let (remaining_txns, remaining_proofs) = proof_queue.remaining_txns_and_proofs(); + assert_eq!(remaining_txns, 0); + assert_eq!(remaining_proofs, 0); + + proof_queue.handle_updated_block_timestamp(10); + + assert!(proof_queue.is_empty()); +} + +#[test] +fn test_proof_queue_pull_full_utilization() { + let my_peer_id = PeerId::random(); + let batch_store = batch_store_for_test(5 * 1024); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + + let author = PeerId::random(); + let author_batches = vec![ + proof_of_store_with_size(author, BatchId::new_for_test(0), 100, 1, 10), + proof_of_store_with_size(author, BatchId::new_for_test(1), 200, 1, 10), + proof_of_store_with_size(author, BatchId::new_for_test(2), 200, 1, 10), + ]; + + for proof in author_batches { + proof_queue.insert_proof(proof); + } + + let (remaining_txns, remaining_proofs) = proof_queue.remaining_txns_and_proofs(); + assert_eq!(remaining_txns, 30); + assert_eq!(remaining_proofs, 3); + + let now_in_secs = aptos_infallible::duration_since_epoch(); + let (proof_block, txns_with_proof_size, cur_unique_txns, proof_queue_fully_utilized) = + proof_queue.pull_proofs( + &HashSet::new(), + PayloadTxnsSize::new(10, 10), + 10, + 10, + true, + now_in_secs, + ); + + assert_eq!(proof_block.len(), 1); + assert_eq!(txns_with_proof_size.count(), 10); + assert_eq!(cur_unique_txns, 10); + assert!(!proof_queue_fully_utilized); + + let now_in_secs = aptos_infallible::duration_since_epoch(); + let (proof_block, txns_with_proof_size, cur_unique_txns, proof_queue_fully_utilized) = + proof_queue.pull_proofs( + &HashSet::new(), + PayloadTxnsSize::new(50, 50), + 50, + 50, + true, + now_in_secs, + ); + + assert_eq!(proof_block.len(), 3); + assert_eq!(txns_with_proof_size.count(), 30); + assert_eq!(cur_unique_txns, 30); + assert!(proof_queue_fully_utilized); + + proof_queue.handle_updated_block_timestamp(10); + assert!(proof_queue.is_empty()); +} diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index aaf5ff6bf4528..e9975b35eaeef 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -14,7 +14,7 @@ use aptos_consensus_types::{ }; use aptos_crypto::HashValue; use aptos_types::{ - aggregate_signature::{AggregateSignature, PartialSignatures}, + aggregate_signature::PartialSignatures, block_info::BlockInfo, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, validator_signer::ValidatorSigner, @@ -22,6 +22,7 @@ use aptos_types::{ }; use move_core_types::account_address::AccountAddress; use std::time::{Duration, Instant}; +use tokio::sync::oneshot; #[derive(Clone)] struct MockBatchRequester { @@ -36,10 +37,6 @@ impl MockBatchRequester { #[async_trait::async_trait] impl QuorumStoreSender for MockBatchRequester { - async fn send_batch_request(&self, _request: BatchRequest, _recipients: Vec) { - unimplemented!() - } - async fn request_batch( &self, _request: BatchRequest, @@ -49,10 +46,6 @@ impl QuorumStoreSender for MockBatchRequester { Ok(self.return_value.clone()) } - async fn send_batch(&self, _batch: Batch, _recipients: Vec) { - unimplemented!() - } - async fn send_signed_batch_info_msg( &self, _signed_batch_infos: Vec, @@ -99,13 +92,14 @@ async fn test_batch_request_exists() { ValidatorVerifier::new_single(validator_signer.author(), validator_signer.public_key()), ); + let (_, subscriber_rx) = oneshot::channel(); let result = batch_requester .request_batch( - ProofOfStore::new( - batch.batch_info().clone(), - AggregateSignature::new(vec![u8::MAX].into(), None), - ), + *batch.digest(), + batch.expiration(), + vec![AccountAddress::random()], tx, + subscriber_rx, ) .await; assert!(result.is_some()); @@ -194,13 +188,14 @@ async fn test_batch_request_not_exists_not_expired() { ); let request_start = Instant::now(); + let (_, subscriber_rx) = oneshot::channel(); let result = batch_requester .request_batch( - ProofOfStore::new( - batch.batch_info().clone(), - AggregateSignature::new(vec![u8::MAX].into(), None), - ), + *batch.digest(), + batch.expiration(), + vec![AccountAddress::random()], tx, + subscriber_rx, ) .await; let request_duration = request_start.elapsed(); @@ -241,13 +236,14 @@ async fn test_batch_request_not_exists_expired() { ); let request_start = Instant::now(); + let (_, subscriber_rx) = oneshot::channel(); let result = batch_requester .request_batch( - ProofOfStore::new( - batch.batch_info().clone(), - AggregateSignature::new(vec![u8::MAX].into(), None), - ), + *batch.digest(), + batch.expiration(), + vec![AccountAddress::random()], tx, + subscriber_rx, ) .await; let request_duration = request_start.elapsed(); diff --git a/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs b/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs index 03a5e903a554c..7f04c4abf71ca 100644 --- a/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs +++ b/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs @@ -4,7 +4,8 @@ use crate::quorum_store::direct_mempool_quorum_store::DirectMempoolQuorumStore; use aptos_consensus_types::{ common::PayloadFilter, - request_response::{GetPayloadCommand, GetPayloadResponse}, + request_response::{GetPayloadCommand, GetPayloadRequest, GetPayloadResponse}, + utils::PayloadTxnsSize, }; use aptos_mempool::{QuorumStoreRequest, QuorumStoreResponse}; use futures::{ @@ -29,15 +30,17 @@ async fn test_block_request_no_txns() { let (consensus_callback, consensus_callback_rcv) = oneshot::channel(); consensus_to_quorum_store_sender - .try_send(GetPayloadCommand::GetPayloadRequest( - 100, - 1000, - 50, - 500, - true, - PayloadFilter::DirectMempool(vec![]), - consensus_callback, - )) + .try_send(GetPayloadCommand::GetPayloadRequest(GetPayloadRequest { + max_txns: PayloadTxnsSize::new(100, 1000), + max_txns_after_filtering: 100, + soft_max_txns_after_filtering: 100, + max_inline_txns: PayloadTxnsSize::new(50, 500), + opt_batch_txns_pct: 0, + return_non_full: true, + filter: PayloadFilter::DirectMempool(vec![]), + callback: consensus_callback, + block_timestamp: aptos_infallible::duration_since_epoch(), + })) .unwrap(); if let QuorumStoreRequest::GetBatchRequest( diff --git a/consensus/src/quorum_store/tests/mod.rs b/consensus/src/quorum_store/tests/mod.rs index 9dfc3e2930c59..fc7c97f6de9ab 100644 --- a/consensus/src/quorum_store/tests/mod.rs +++ b/consensus/src/quorum_store/tests/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 mod batch_generator_test; +mod batch_proof_queue_test; mod batch_requester_test; mod batch_store_test; mod direct_mempool_quorum_store_test; @@ -9,4 +10,3 @@ mod proof_coordinator_test; mod proof_manager_test; mod quorum_store_db_test; mod types_test; -mod utils; diff --git a/consensus/src/quorum_store/tests/proof_coordinator_test.rs b/consensus/src/quorum_store/tests/proof_coordinator_test.rs index cf7567bbe3e18..2a2a2378ee9b2 100644 --- a/consensus/src/quorum_store/tests/proof_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/proof_coordinator_test.rs @@ -10,9 +10,7 @@ use crate::{ }, test_utils::{create_vec_signed_transactions, mock_quorum_store_sender::MockQuorumStoreSender}, }; -use aptos_consensus_types::proof_of_store::{ - BatchId, ProofOfStore, SignedBatchInfo, SignedBatchInfoMsg, -}; +use aptos_consensus_types::proof_of_store::{BatchId, SignedBatchInfo, SignedBatchInfoMsg}; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; use aptos_types::{ @@ -20,7 +18,7 @@ use aptos_types::{ }; use mini_moka::sync::Cache; use std::sync::Arc; -use tokio::sync::{mpsc::channel, oneshot::Receiver}; +use tokio::sync::mpsc::channel; pub struct MockBatchReader { peer: PeerId, @@ -31,7 +29,12 @@ impl BatchReader for MockBatchReader { Some(self.peer) } - fn get_batch(&self, _proof: ProofOfStore) -> Receiver>> { + fn get_batch( + &self, + _digest: HashValue, + _expiration: u64, + _signers: Vec, + ) -> tokio::sync::oneshot::Receiver>> { unimplemented!() } diff --git a/consensus/src/quorum_store/tests/proof_manager_test.rs b/consensus/src/quorum_store/tests/proof_manager_test.rs index 812a854f62d9c..cf87abfecba84 100644 --- a/consensus/src/quorum_store/tests/proof_manager_test.rs +++ b/consensus/src/quorum_store/tests/proof_manager_test.rs @@ -7,16 +7,17 @@ use crate::quorum_store::{ use aptos_consensus_types::{ common::{Payload, PayloadFilter}, proof_of_store::{BatchId, BatchInfo, ProofOfStore}, - request_response::{GetPayloadCommand, GetPayloadResponse}, + request_response::{GetPayloadCommand, GetPayloadRequest, GetPayloadResponse}, + utils::PayloadTxnsSize, }; use aptos_crypto::HashValue; use aptos_types::{aggregate_signature::AggregateSignature, PeerId}; use futures::channel::oneshot; -use std::collections::HashSet; +use std::{cmp::max, collections::HashSet}; fn create_proof_manager() -> ProofManager { let batch_store = batch_store_for_test(5 * 1024 * 1024); - ProofManager::new(PeerId::random(), 10, 10, batch_store, true) + ProofManager::new(PeerId::random(), 10, 10, batch_store, true, false) } fn create_proof(author: PeerId, expiration: u64, batch_sequence: u64) -> ProofOfStore { @@ -53,15 +54,17 @@ async fn get_proposal( ) -> Payload { let (callback_tx, callback_rx) = oneshot::channel(); let filter_set = HashSet::from_iter(filter.iter().cloned()); - let req = GetPayloadCommand::GetPayloadRequest( - max_txns, - 1000000, - max_txns / 2, - 100000, - true, - PayloadFilter::InQuorumStore(filter_set), - callback_tx, - ); + let req = GetPayloadCommand::GetPayloadRequest(GetPayloadRequest { + max_txns: PayloadTxnsSize::new(max_txns, 1000000), + max_txns_after_filtering: max_txns, + soft_max_txns_after_filtering: max_txns, + max_inline_txns: PayloadTxnsSize::new(max(max_txns / 2, 1), 100000), + filter: PayloadFilter::InQuorumStore(filter_set), + callback: callback_tx, + block_timestamp: aptos_infallible::duration_since_epoch(), + opt_batch_txns_pct: 0, + return_non_full: true, + }); proof_manager.handle_proposal_request(req); let GetPayloadResponse::GetPayloadResponse(payload) = callback_rx.await.unwrap().unwrap(); payload @@ -70,7 +73,7 @@ async fn get_proposal( fn assert_payload_response( payload: Payload, expected: &[ProofOfStore], - max_txns_from_block_to_execute: Option, + max_txns_from_block_to_execute: Option, ) { match payload { Payload::InQuorumStore(proofs) => { diff --git a/consensus/src/quorum_store/tests/utils.rs b/consensus/src/quorum_store/tests/utils.rs deleted file mode 100644 index 922ae1d67a3af..0000000000000 --- a/consensus/src/quorum_store/tests/utils.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::quorum_store::utils::ProofQueue; -use aptos_consensus_types::proof_of_store::{BatchId, BatchInfo, ProofOfStore}; -use aptos_crypto::HashValue; -use aptos_types::{aggregate_signature::AggregateSignature, PeerId}; -use maplit::hashset; - -/// Return a ProofOfStore with minimal fields used by ProofQueue tests. -fn proof_of_store(author: PeerId, batch_id: BatchId, gas_bucket_start: u64) -> ProofOfStore { - ProofOfStore::new( - BatchInfo::new( - author, - batch_id, - 0, - 0, - HashValue::random(), - 1, - 1, - gas_bucket_start, - ), - AggregateSignature::empty(), - ) -} - -#[test] -fn test_proof_queue_sorting() { - let my_peer_id = PeerId::random(); - let mut proof_queue = ProofQueue::new(my_peer_id); - - let author_0 = PeerId::random(); - let author_1 = PeerId::random(); - - let author_0_batches = vec![ - proof_of_store(author_0, BatchId::new_for_test(0), 100), - proof_of_store(author_0, BatchId::new_for_test(1), 200), - proof_of_store(author_0, BatchId::new_for_test(2), 50), - proof_of_store(author_0, BatchId::new_for_test(3), 300), - ]; - for batch in author_0_batches { - proof_queue.push(batch); - } - let author_1_batches = vec![ - proof_of_store(author_1, BatchId::new_for_test(4), 500), - proof_of_store(author_1, BatchId::new_for_test(5), 400), - proof_of_store(author_1, BatchId::new_for_test(6), 600), - proof_of_store(author_1, BatchId::new_for_test(7), 50), - ]; - for batch in author_1_batches { - proof_queue.push(batch); - } - - // Expect: [600, 300] - let (pulled, _) = proof_queue.pull_proofs(&hashset![], 2, 2, true); - let mut count_author_0 = 0; - let mut count_author_1 = 0; - let mut prev: Option<&ProofOfStore> = None; - for batch in &pulled { - if let Some(prev) = prev { - assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); - } else { - assert_eq!(batch.gas_bucket_start(), 600); - } - if batch.author() == author_0 { - count_author_0 += 1; - } else { - count_author_1 += 1; - } - prev = Some(batch); - } - assert_eq!(count_author_0, 1); - assert_eq!(count_author_1, 1); - - // Expect: [600, 500, 300, 100] - let (pulled, _) = proof_queue.pull_proofs(&hashset![], 4, 4, true); - let mut count_author_0 = 0; - let mut count_author_1 = 0; - let mut prev: Option<&ProofOfStore> = None; - for batch in &pulled { - if let Some(prev) = prev { - assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); - } else { - assert_eq!(batch.gas_bucket_start(), 600); - } - if batch.author() == author_0 { - count_author_0 += 1; - } else { - count_author_1 += 1; - } - prev = Some(batch); - } - assert_eq!(count_author_0, 2); - assert_eq!(count_author_1, 2); -} diff --git a/consensus/src/quorum_store/types.rs b/consensus/src/quorum_store/types.rs index 15c6d67211f9a..96b6324c39a14 100644 --- a/consensus/src/quorum_store/types.rs +++ b/consensus/src/quorum_store/types.rs @@ -3,7 +3,7 @@ use anyhow::ensure; use aptos_consensus_types::{ - common::BatchPayload, + common::{BatchPayload, TxnSummaryWithExpiration}, proof_of_store::{BatchId, BatchInfo}, }; use aptos_crypto::{hash::CryptoHash, HashValue}; @@ -57,6 +57,27 @@ impl PersistedValue { pub fn payload(&self) -> &Option> { &self.maybe_payload } + + pub fn summary(&self) -> Vec { + if let Some(payload) = &self.maybe_payload { + return payload + .iter() + .map(|txn| { + TxnSummaryWithExpiration::new( + txn.sender(), + txn.sequence_number(), + txn.expiration_timestamp_secs(), + txn.committed_hash(), + ) + }) + .collect(); + } + vec![] + } + + pub fn unpack(self) -> (BatchInfo, Option>) { + (self.info, self.maybe_payload) + } } impl Deref for PersistedValue { diff --git a/consensus/src/quorum_store/utils.rs b/consensus/src/quorum_store/utils.rs index 048b3c7fcecfe..009763d9a1190 100644 --- a/consensus/src/quorum_store/utils.rs +++ b/consensus/src/quorum_store/utils.rs @@ -1,23 +1,21 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{monitor, quorum_store::counters}; +use crate::monitor; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::{BatchId, BatchInfo, ProofOfStore}, + proof_of_store::{BatchId, BatchInfo}, }; use aptos_logger::prelude::*; use aptos_mempool::{QuorumStoreRequest, QuorumStoreResponse}; use aptos_types::{transaction::SignedTransaction, PeerId}; use chrono::Utc; use futures::channel::{mpsc::Sender, oneshot}; -use move_core_types::account_address::AccountAddress; -use rand::{seq::SliceRandom, thread_rng}; use std::{ cmp::{Ordering, Reverse}, - collections::{BTreeMap, BinaryHeap, HashMap, HashSet, VecDeque}, + collections::{BTreeMap, BinaryHeap, HashSet, VecDeque}, hash::Hash, - time::{Duration, Instant}, + time::Duration, }; use tokio::time::timeout; @@ -33,12 +31,14 @@ impl Timeouts { } pub(crate) fn add(&mut self, value: T, timeout: usize) { + #[allow(deprecated)] let expiry = Utc::now().naive_utc().timestamp_millis() + timeout as i64; self.timeouts.push_back((expiry, value)); } pub(crate) fn expire(&mut self) -> Vec { - let cur_time = chrono::Utc::now().naive_utc().timestamp_millis(); + #[allow(deprecated)] + let cur_time = Utc::now().naive_utc().timestamp_millis(); trace!( "QS: expire cur time {} timeouts len {}", cur_time, @@ -73,6 +73,8 @@ impl TimeExpirations { } /// Expire and return items corresponding to expiration <= given certified time. + /// Unwrap is safe because peek() is called in loop condition. + #[allow(clippy::unwrap_used)] pub(crate) fn expire(&mut self, certified_time: u64) -> HashSet { let mut ret = HashSet::new(); while let Some((Reverse(t), _)) = self.expiries.peek() { @@ -85,6 +87,11 @@ impl TimeExpirations { } ret } + + #[cfg(test)] + pub(crate) fn is_empty(&self) -> bool { + self.expiries.is_empty() + } } pub struct MempoolProxy { @@ -140,7 +147,7 @@ impl MempoolProxy { } } -#[derive(PartialEq, Eq, Hash, Clone)] +#[derive(PartialEq, Eq, Hash, Clone, Debug)] pub struct BatchKey { author: PeerId, batch_id: BatchId, @@ -155,9 +162,9 @@ impl BatchKey { } } -#[derive(PartialEq, Eq, Clone, Hash)] +#[derive(PartialEq, Eq, Clone, Hash, Debug)] pub struct BatchSortKey { - batch_key: BatchKey, + pub(crate) batch_key: BatchKey, gas_bucket_start: u64, } @@ -191,223 +198,3 @@ impl Ord for BatchSortKey { other.batch_key.batch_id.cmp(&self.batch_key.batch_id) } } - -pub struct ProofQueue { - my_peer_id: PeerId, - // Queue per peer to ensure fairness between peers and priority within peer - author_to_batches: HashMap>, - // ProofOfStore and insertion_time. None if committed - batch_to_proof: HashMap>, - // Expiration index - expirations: TimeExpirations, - latest_block_timestamp: u64, - remaining_txns: u64, - remaining_proofs: u64, - remaining_local_txns: u64, - remaining_local_proofs: u64, -} - -impl ProofQueue { - pub(crate) fn new(my_peer_id: PeerId) -> Self { - Self { - my_peer_id, - author_to_batches: HashMap::new(), - batch_to_proof: HashMap::new(), - expirations: TimeExpirations::new(), - latest_block_timestamp: 0, - remaining_txns: 0, - remaining_proofs: 0, - remaining_local_txns: 0, - remaining_local_proofs: 0, - } - } - - #[inline] - fn inc_remaining(&mut self, author: &AccountAddress, num_txns: u64) { - self.remaining_txns += num_txns; - self.remaining_proofs += 1; - if *author == self.my_peer_id { - self.remaining_local_txns += num_txns; - self.remaining_local_proofs += 1; - } - } - - #[inline] - fn dec_remaining(&mut self, author: &AccountAddress, num_txns: u64) { - self.remaining_txns -= num_txns; - self.remaining_proofs -= 1; - if *author == self.my_peer_id { - self.remaining_local_txns -= num_txns; - self.remaining_local_proofs -= 1; - } - } - - pub(crate) fn push(&mut self, proof: ProofOfStore) { - if proof.expiration() < self.latest_block_timestamp { - counters::inc_rejected_pos_count(counters::POS_EXPIRED_LABEL); - return; - } - let batch_key = BatchKey::from_info(proof.info()); - if self.batch_to_proof.contains_key(&batch_key) { - counters::inc_rejected_pos_count(counters::POS_DUPLICATE_LABEL); - return; - } - - let author = proof.author(); - let bucket = proof.gas_bucket_start(); - let num_txns = proof.num_txns(); - let expiration = proof.expiration(); - - let batch_sort_key = BatchSortKey::from_info(proof.info()); - let queue = self.author_to_batches.entry(author).or_default(); - queue.insert(batch_sort_key.clone(), proof.info().clone()); - self.expirations.add_item(batch_sort_key, expiration); - self.batch_to_proof - .insert(batch_key, Some((proof, Instant::now()))); - - if author == self.my_peer_id { - counters::inc_local_pos_count(bucket); - } else { - counters::inc_remote_pos_count(bucket); - } - - self.inc_remaining(&author, num_txns); - } - - // gets excluded and iterates over the vector returning non excluded or expired entries. - // return the vector of pulled PoS, and the size of the remaining PoS - // The flag in the second return argument is true iff the entire proof queue is fully utilized - // when pulling the proofs. If any proof from proof queue cannot be included due to size limits, - // this flag is set false. - pub(crate) fn pull_proofs( - &mut self, - excluded_batches: &HashSet, - max_txns: u64, - max_bytes: u64, - return_non_full: bool, - ) -> (Vec, bool) { - let mut ret = vec![]; - let mut cur_bytes = 0; - let mut cur_txns = 0; - let mut excluded_txns = 0; - let mut full = false; - - let mut iters = vec![]; - for (_, batches) in self.author_to_batches.iter() { - iters.push(batches.iter().rev()); - } - - while !iters.is_empty() { - iters.shuffle(&mut thread_rng()); - iters.retain_mut(|iter| { - if let Some((sort_key, batch)) = iter.next() { - if excluded_batches.contains(batch) { - excluded_txns += batch.num_txns(); - } else if let Some(Some((proof, insertion_time))) = - self.batch_to_proof.get(&sort_key.batch_key) - { - if cur_bytes + batch.num_bytes() > max_bytes - || cur_txns + batch.num_txns() > max_txns - { - // Exceeded the limit for requested bytes or number of transactions. - full = true; - return false; - } - cur_bytes += batch.num_bytes(); - cur_txns += batch.num_txns(); - let bucket = proof.gas_bucket_start(); - ret.push(proof.clone()); - counters::pos_to_pull(bucket, insertion_time.elapsed().as_secs_f64()); - if cur_bytes == max_bytes || cur_txns == max_txns { - // Exactly the limit for requested bytes or number of transactions. - full = true; - return false; - } - } - true - } else { - false - } - }) - } - info!( - // before non full check - byte_size = cur_bytes, - block_size = cur_txns, - batch_count = ret.len(), - full = full, - return_non_full = return_non_full, - "Pull payloads from QuorumStore: internal" - ); - - if full || return_non_full { - counters::BLOCK_SIZE_WHEN_PULL.observe(cur_txns as f64); - counters::BLOCK_BYTES_WHEN_PULL.observe(cur_bytes as f64); - counters::PROOF_SIZE_WHEN_PULL.observe(ret.len() as f64); - counters::EXCLUDED_TXNS_WHEN_PULL.observe(excluded_txns as f64); - // Stable sort, so the order of proofs within an author will not change. - ret.sort_by_key(|proof| Reverse(proof.gas_bucket_start())); - (ret, !full) - } else { - (Vec::new(), !full) - } - } - - pub(crate) fn handle_updated_block_timestamp(&mut self, block_timestamp: u64) { - assert!( - self.latest_block_timestamp <= block_timestamp, - "Decreasing block timestamp" - ); - self.latest_block_timestamp = block_timestamp; - - let expired = self.expirations.expire(block_timestamp); - let mut num_expired_but_not_committed = 0; - for key in &expired { - if let Some(mut queue) = self.author_to_batches.remove(&key.author()) { - if let Some(batch) = queue.remove(key) { - if self - .batch_to_proof - .get(&key.batch_key) - .expect("Entry for unexpired batch must exist") - .is_some() - { - // non-committed proof that is expired - num_expired_but_not_committed += 1; - counters::GAP_BETWEEN_BATCH_EXPIRATION_AND_CURRENT_TIME_WHEN_COMMIT - .observe((block_timestamp - batch.expiration()) as f64); - self.dec_remaining(&batch.author(), batch.num_txns()); - } - claims::assert_some!(self.batch_to_proof.remove(&key.batch_key)); - } - if !queue.is_empty() { - self.author_to_batches.insert(key.author(), queue); - } - } - } - counters::NUM_PROOFS_EXPIRED_WHEN_COMMIT.inc_by(num_expired_but_not_committed); - } - - pub(crate) fn remaining_txns_and_proofs(&self) -> (u64, u64) { - counters::NUM_TOTAL_TXNS_LEFT_ON_UPDATE.observe(self.remaining_txns as f64); - counters::NUM_TOTAL_PROOFS_LEFT_ON_UPDATE.observe(self.remaining_proofs as f64); - counters::NUM_LOCAL_TXNS_LEFT_ON_UPDATE.observe(self.remaining_local_txns as f64); - counters::NUM_LOCAL_PROOFS_LEFT_ON_UPDATE.observe(self.remaining_local_proofs as f64); - - (self.remaining_txns, self.remaining_proofs) - } - - // Mark in the hashmap committed PoS, but keep them until they expire - pub(crate) fn mark_committed(&mut self, batches: Vec) { - for batch in batches { - let batch_key = BatchKey::from_info(&batch); - if let Some(Some((proof, insertion_time))) = self.batch_to_proof.get(&batch_key) { - counters::pos_to_commit( - proof.gas_bucket_start(), - insertion_time.elapsed().as_secs_f64(), - ); - self.dec_remaining(&batch.author(), batch.num_txns()); - } - self.batch_to_proof.insert(batch_key, None); - } - } -} diff --git a/consensus/src/rand/rand_gen/block_queue.rs b/consensus/src/rand/rand_gen/block_queue.rs index 16522cfe5a9b2..c76d9b92bb292 100644 --- a/consensus/src/rand/rand_gen/block_queue.rs +++ b/consensus/src/rand/rand_gen/block_queue.rs @@ -40,12 +40,16 @@ impl QueueItem { self.blocks().len() } + #[allow(clippy::unwrap_used)] pub fn first_round(&self) -> u64 { self.blocks().first().unwrap().block().round() } pub fn offset(&self, round: Round) -> usize { - *self.offsets_by_round.get(&round).unwrap() + *self + .offsets_by_round + .get(&round) + .expect("Round should be in the queue") } pub fn num_undecided(&self) -> usize { @@ -106,6 +110,8 @@ impl BlockQueue { } /// Dequeue all ordered blocks prefix that have randomness + /// Unwrap is safe because the queue is not empty + #[allow(clippy::unwrap_used)] pub fn dequeue_rand_ready_prefix(&mut self) -> Vec { let mut rand_ready_prefix = vec![]; while let Some((_starting_round, item)) = self.queue.first_key_value() { diff --git a/consensus/src/rand/rand_gen/network_messages.rs b/consensus/src/rand/rand_gen/network_messages.rs index 2e5adf01a3255..09a3edfa463af 100644 --- a/consensus/src/rand/rand_gen/network_messages.rs +++ b/consensus/src/rand/rand_gen/network_messages.rs @@ -82,6 +82,7 @@ impl TConsensusMsg for RandMessage { } } + #[allow(clippy::unwrap_used)] fn into_network_message(self) -> ConsensusMsg { ConsensusMsg::RandGenMessage(RandGenMessage { epoch: self.epoch(), diff --git a/consensus/src/rand/rand_gen/rand_manager.rs b/consensus/src/rand/rand_gen/rand_manager.rs index c6efe6724ae13..d112c1b140632 100644 --- a/consensus/src/rand/rand_gen/rand_manager.rs +++ b/consensus/src/rand/rand_gen/rand_manager.rs @@ -23,7 +23,7 @@ use aptos_channels::aptos_channel; use aptos_config::config::ReliableBroadcastConfig; use aptos_consensus_types::common::{Author, Round}; use aptos_infallible::Mutex; -use aptos_logger::{debug, error, info, spawn_named, warn}; +use aptos_logger::{error, info, spawn_named, trace, warn}; use aptos_network::{protocols::network::RpcError, ProtocolId}; use aptos_reliable_broadcast::{DropGuard, ReliableBroadcast}; use aptos_time_service::TimeService; @@ -212,7 +212,10 @@ impl RandManager { message: RandMessage, ) { let msg = message.into_network_message(); - let _ = sender.send(Ok(protocol.to_bytes(&msg).unwrap().into())); + let _ = sender.send(Ok(protocol + .to_bytes(&msg) + .expect("Message should be serializable into protocol") + .into())); } async fn verification_task( @@ -348,7 +351,7 @@ impl RandManager { incoming_rpc_request: aptos_channel::Receiver, mut reset_rx: Receiver, bounded_executor: BoundedExecutor, - highest_ordered_round: Round, + highest_known_round: Round, ) { info!("RandManager started"); let (verified_msg_tx, mut verified_msg_rx) = unbounded(); @@ -357,7 +360,7 @@ impl RandManager { let fast_rand_config = self.fast_config.clone(); self.rand_store .lock() - .update_highest_known_round(highest_ordered_round); + .update_highest_known_round(highest_known_round); spawn_named!( "rand manager verification", Self::verification_task( @@ -409,7 +412,7 @@ impl RandManager { } } RandMessage::Share(share) => { - debug!(LogSchema::new(LogEvent::ReceiveProactiveRandShare) + trace!(LogSchema::new(LogEvent::ReceiveProactiveRandShare) .author(self.author) .epoch(share.epoch()) .round(share.metadata().round) @@ -420,7 +423,7 @@ impl RandManager { } } RandMessage::FastShare(share) => { - debug!(LogSchema::new(LogEvent::ReceiveRandShareFastPath) + trace!(LogSchema::new(LogEvent::ReceiveRandShareFastPath) .author(self.author) .epoch(share.epoch()) .round(share.metadata().round) @@ -437,7 +440,13 @@ impl RandManager { .remote_peer(*aug_data.author())); match self.aug_data_store.add_aug_data(aug_data) { Ok(sig) => self.process_response(protocol, response_sender, RandMessage::AugDataSignature(sig)), - Err(e) => error!("[RandManager] Failed to add aug data: {}", e), + Err(e) => { + if e.to_string().contains("[AugDataStore] equivocate data") { + warn!("[RandManager] Failed to add aug data: {}", e); + } else { + error!("[RandManager] Failed to add aug data: {}", e); + } + }, } } RandMessage::CertifiedAugData(certified_aug_data) => { diff --git a/consensus/src/rand/rand_gen/types.rs b/consensus/src/rand/rand_gen/types.rs index e8d5670636604..2b6d30f8b5a94 100644 --- a/consensus/src/rand/rand_gen/types.rs +++ b/consensus/src/rand/rand_gen/types.rs @@ -65,7 +65,9 @@ impl TShare for Share { WVUF::verify_share( &rand_config.vuf_pp, apk, - bcs::to_bytes(&rand_metadata).unwrap().as_slice(), + bcs::to_bytes(&rand_metadata) + .map_err(|e| anyhow!("Serialization failed: {}", e))? + .as_slice(), &self.share, )?; } else { @@ -78,6 +80,7 @@ impl TShare for Share { Ok(()) } + #[allow(clippy::unwrap_used)] fn generate(rand_config: &RandConfig, rand_metadata: RandMetadata) -> RandShare where Self: Sized, @@ -629,7 +632,7 @@ impl RandConfig { .validator .address_to_validator_index() .get(peer) - .unwrap() + .expect("Peer should be in the index!") } pub fn get_certified_apk(&self, peer: &Author) -> Option<&APK> { diff --git a/consensus/src/recovery_manager.rs b/consensus/src/recovery_manager.rs index 9f1f34a1e0877..57e308570f93d 100644 --- a/consensus/src/recovery_manager.rs +++ b/consensus/src/recovery_manager.rs @@ -7,7 +7,7 @@ use crate::{ error::error_kind, monitor, network::NetworkSender, - payload_manager::PayloadManager, + payload_manager::TPayloadManager, persistent_liveness_storage::{PersistentLivenessStorage, RecoveryData}, pipeline::execution_client::TExecutionClient, round_manager::VerifiedEvent, @@ -33,7 +33,7 @@ pub struct RecoveryManager { execution_client: Arc, last_committed_round: Round, max_blocks_to_request: u64, - payload_manager: Arc, + payload_manager: Arc, order_vote_enabled: bool, pending_blocks: Arc>, } @@ -46,7 +46,7 @@ impl RecoveryManager { execution_client: Arc, last_committed_round: Round, max_blocks_to_request: u64, - payload_manager: Arc, + payload_manager: Arc, order_vote_enabled: bool, pending_blocks: Arc>, ) -> Self { diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index 2a4dd84a4a96e..bd7be5172d775 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -5,12 +5,12 @@ use crate::{ block_storage::{ tracing::{observe_block, BlockStage}, - BlockReader, BlockRetriever, BlockStore, + BlockReader, BlockRetriever, BlockStore, NeedFetchResult, }, counters::{ self, ORDER_CERT_CREATED_WITHOUT_BLOCK_IN_BLOCK_STORE, ORDER_VOTE_ADDED, - ORDER_VOTE_BROADCASTED, ORDER_VOTE_OTHER_ERRORS, ORDER_VOTE_VERY_OLD, PROPOSAL_VOTE_ADDED, - PROPOSAL_VOTE_BROADCASTED, PROPOSED_VTXN_BYTES, PROPOSED_VTXN_COUNT, + ORDER_VOTE_BROADCASTED, ORDER_VOTE_NOT_IN_RANGE, ORDER_VOTE_OTHER_ERRORS, + PROPOSAL_VOTE_ADDED, PROPOSAL_VOTE_BROADCASTED, PROPOSED_VTXN_BYTES, PROPOSED_VTXN_COUNT, QC_AGGREGATED_FROM_VOTES, SYNC_INFO_RECEIVED_WITH_NEWER_CERT, }, error::{error_kind, VerifyError}, @@ -51,7 +51,7 @@ use aptos_consensus_types::{ vote_msg::VoteMsg, wrapped_ledger_info::WrappedLedgerInfo, }; -use aptos_crypto::HashValue; +use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_infallible::{checked, Mutex}; use aptos_logger::prelude::*; #[cfg(test)] @@ -69,11 +69,11 @@ use aptos_types::{ PeerId, }; use fail::fail_point; -use futures::{channel::oneshot, FutureExt, StreamExt}; +use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; use futures_channel::mpsc::UnboundedReceiver; use lru::LruCache; use serde::Serialize; -use std::{mem::Discriminant, sync::Arc, time::Duration}; +use std::{mem::Discriminant, pin::Pin, sync::Arc, time::Duration}; use tokio::{ sync::oneshot as TokioOneshot, time::{sleep, Instant}, @@ -126,7 +126,7 @@ impl UnverifiedEvent { }, UnverifiedEvent::OrderVoteMsg(v) => { if !self_message { - v.verify(validator)?; + v.verify_order_vote(validator)?; counters::VERIFY_MSG .with_label_values(&["order_vote"]) .observe(start_time.elapsed().as_secs_f64()); @@ -232,8 +232,8 @@ pub struct RoundManager { epoch_state: Arc, block_store: Arc, round_state: RoundState, - proposer_election: UnequivocalProposerElection, - proposal_generator: ProposalGenerator, + proposer_election: Arc, + proposal_generator: Arc, safety_rules: Arc>, network: Arc, storage: Arc, @@ -250,6 +250,9 @@ pub struct RoundManager { // To avoid duplicate broadcasts for the same block, we keep track of blocks for // which we recently broadcasted fast shares. blocks_with_broadcasted_fast_shares: LruCache, + futures: FuturesUnordered< + Pin, Block, Instant)> + Send>>, + >, } impl RoundManager { @@ -284,8 +287,8 @@ impl RoundManager { epoch_state, block_store, round_state, - proposer_election: UnequivocalProposerElection::new(proposer_election), - proposal_generator, + proposer_election: Arc::new(UnequivocalProposerElection::new(proposer_election)), + proposal_generator: Arc::new(proposal_generator), safety_rules, network, storage, @@ -298,6 +301,7 @@ impl RoundManager { fast_rand_config, pending_order_votes: PendingOrderVotes::new(), blocks_with_broadcasted_fast_shares: LruCache::new(5), + futures: FuturesUnordered::new(), } } @@ -353,23 +357,69 @@ impl RoundManager { .proposer_election .is_valid_proposer(self.proposal_generator.author(), new_round_event.round) { - self.log_collected_vote_stats(&new_round_event); - self.round_state.setup_leader_timeout(); - let proposal_msg = self.generate_proposal(new_round_event).await?; - #[cfg(feature = "failpoints")] - { - if self.check_whether_to_inject_reconfiguration_error() { - self.attempt_to_inject_reconfiguration_error(&proposal_msg) - .await?; + let epoch_state = self.epoch_state.clone(); + let network = self.network.clone(); + let sync_info = self.block_store.sync_info(); + let proposal_generator = self.proposal_generator.clone(); + let safety_rules = self.safety_rules.clone(); + let proposer_election = self.proposer_election.clone(); + tokio::spawn(async move { + if let Err(e) = Self::generate_and_send_proposal( + epoch_state, + new_round_event, + network, + sync_info, + proposal_generator, + safety_rules, + proposer_election, + ) + .await + { + warn!("Error generating and sending proposal: {}", e); } - } - self.network.broadcast_proposal(proposal_msg).await; - counters::PROPOSALS_COUNT.inc(); + }); } Ok(()) } - fn log_collected_vote_stats(&self, new_round_event: &NewRoundEvent) { + async fn generate_and_send_proposal( + epoch_state: Arc, + new_round_event: NewRoundEvent, + network: Arc, + sync_info: SyncInfo, + proposal_generator: Arc, + safety_rules: Arc>, + proposer_election: Arc, + ) -> anyhow::Result<()> { + let epoch = epoch_state.epoch; + Self::log_collected_vote_stats(epoch_state.clone(), &new_round_event); + let proposal_msg = Self::generate_proposal( + epoch, + new_round_event, + sync_info, + network.clone(), + proposal_generator, + safety_rules, + proposer_election, + ) + .await?; + #[cfg(feature = "failpoints")] + { + if Self::check_whether_to_inject_reconfiguration_error() { + Self::attempt_to_inject_reconfiguration_error( + epoch_state, + network.clone(), + &proposal_msg, + ) + .await?; + } + }; + network.broadcast_proposal(proposal_msg).await; + counters::PROPOSALS_COUNT.inc(); + Ok(()) + } + + fn log_collected_vote_stats(epoch_state: Arc, new_round_event: &NewRoundEvent) { let prev_round_votes_for_li = new_round_event .prev_round_votes .iter() @@ -378,7 +428,7 @@ impl RoundManager { .signatures() .keys() .map(|author| { - self.epoch_state + epoch_state .verifier .get_voting_power(author) .map(|voting_power| (voting_power as u128, 1)) @@ -407,7 +457,7 @@ impl RoundManager { let (voting_power, votes): (Vec<_>, Vec<_>) = timeout_votes .signers() .map(|author| { - self.epoch_state + epoch_state .verifier .get_voting_power(author) .map(|voting_power| (voting_power as u128, 1)) @@ -425,9 +475,9 @@ impl RoundManager { counters::PROPOSER_COLLECTED_TIMEOUT_VOTING_POWER.inc_by(timeout_voting_power as f64); info!( - epoch = self.epoch_state.epoch, + epoch = epoch_state.epoch, round = new_round_event.round, - total_voting_power = ?self.epoch_state.verifier.total_voting_power(), + total_voting_power = ?epoch_state.verifier.total_voting_power(), max_voting_power = ?max_voting_power, max_num_votes = max_num_votes, conflicting_voting_power = ?conflicting_voting_power, @@ -438,31 +488,51 @@ impl RoundManager { ); } + #[cfg(feature = "fuzzing")] + async fn generate_proposal_for_test( + &self, + new_round_event: NewRoundEvent, + ) -> anyhow::Result { + Self::generate_proposal( + self.epoch_state().epoch, + new_round_event, + self.block_store.sync_info(), + self.network.clone(), + self.proposal_generator.clone(), + self.safety_rules.clone(), + self.proposer_election.clone(), + ) + .await + } + async fn generate_proposal( - &mut self, + epoch: u64, new_round_event: NewRoundEvent, + sync_info: SyncInfo, + network: Arc, + proposal_generator: Arc, + safety_rules: Arc>, + proposer_election: Arc, ) -> anyhow::Result { // Proposal generator will ensure that at most one proposal is generated per round - let sync_info = self.block_store.sync_info(); - let sender = self.network.clone(); + let callback_sync_info = sync_info.clone(); let callback = async move { - sender.broadcast_sync_info(sync_info).await; + network.broadcast_sync_info(callback_sync_info).await; } .boxed(); - let proposal = self - .proposal_generator - .generate_proposal(new_round_event.round, &mut self.proposer_election, callback) + let proposal = proposal_generator + .generate_proposal(new_round_event.round, proposer_election, callback) .await?; - let signature = self.safety_rules.lock().sign_proposal(&proposal)?; + let signature = safety_rules.lock().sign_proposal(&proposal)?; let signed_proposal = Block::new_proposal_from_block_data_and_signature(proposal, signature); observe_block(signed_proposal.timestamp_usecs(), BlockStage::SIGNED); - info!(self.new_log(LogEvent::Propose), "{}", signed_proposal); - Ok(ProposalMsg::new( - signed_proposal, - self.block_store.sync_info(), - )) + info!( + Self::new_log_with_round_epoch(LogEvent::Propose, new_round_event.round, epoch), + "{}", signed_proposal + ); + Ok(ProposalMsg::new(signed_proposal, sync_info)) } /// Process the proposal message: @@ -485,22 +555,27 @@ impl RoundManager { block_parent_hash = proposal_msg.proposal().quorum_cert().certified_block().id(), ); - if self + let in_correct_round = self .ensure_round_and_sync_up( proposal_msg.proposal().round(), proposal_msg.sync_info(), proposal_msg.proposer(), ) .await - .context("[RoundManager] Process proposal")? - { + .context("[RoundManager] Process proposal")?; + if in_correct_round { self.process_proposal(proposal_msg.take_proposal()).await } else { - bail!( - "Stale proposal {}, current round {}", - proposal_msg.proposal(), - self.round_state.current_round() + sample!( + SampleRate::Duration(Duration::from_secs(30)), + warn!( + "[sampled] Stale proposal {}, current round {}", + proposal_msg.proposal(), + self.round_state.current_round() + ) ); + counters::ERROR_COUNT.inc(); + Ok(()) } } @@ -526,8 +601,7 @@ impl RoundManager { let vote = msg.vote().clone(); let vote_reception_result = self .round_state - .process_delayed_qc_msg(&self.epoch_state.verifier, msg) - .await; + .process_delayed_qc_msg(&self.epoch_state.verifier, msg); trace!( "Received delayed QC message and vote reception result is {:?}", vote_reception_result @@ -657,13 +731,13 @@ impl RoundManager { // Didn't vote in this round yet, generate a backup vote let nil_block = self .proposal_generator - .generate_nil_block(round, &mut self.proposer_election)?; + .generate_nil_block(round, self.proposer_election.clone())?; info!( self.new_log(LogEvent::VoteNIL), "Planning to vote for a NIL block {}", nil_block ); counters::VOTE_NIL_COUNT.inc(); - let nil_vote = self.execute_and_vote(nil_block).await?; + let nil_vote = self.vote_block(nil_block).await?; (true, nil_vote) }, }; @@ -802,7 +876,7 @@ impl RoundManager { proposal.round(), proposal.quorum_cert().certified_block().round(), false, - &mut self.proposer_election, + self.proposer_election.clone(), ); ensure!( proposal.block_data().failed_authors().map_or(false, |failed_authors| *failed_authors == expected_failed_authors), @@ -823,6 +897,41 @@ impl RoundManager { ); observe_block(proposal.timestamp_usecs(), BlockStage::SYNCED); + + let block_store = self.block_store.clone(); + if !block_store.check_payload(&proposal) { + debug!("Payload not available locally for block: {}", proposal.id()); + counters::CONSENSUS_PROPOSAL_PAYLOAD_AVAILABILITY + .with_label_values(&["missing"]) + .inc(); + let start_time = Instant::now(); + let future = async move { + ( + block_store.wait_for_payload(&proposal).await, + proposal, + start_time, + ) + } + .boxed(); + self.futures.push(future); + return Ok(()); + } + + counters::CONSENSUS_PROPOSAL_PAYLOAD_AVAILABILITY + .with_label_values(&["available"]) + .inc(); + + self.check_backpressure_and_process_proposal(proposal).await + } + + async fn check_backpressure_and_process_proposal( + &mut self, + proposal: Block, + ) -> anyhow::Result<()> { + let author = proposal + .author() + .expect("Proposal should be verified having an author"); + if self.block_store.vote_back_pressure() { counters::CONSENSUS_WITHOLD_VOTE_BACKPRESSURE_TRIGGERED.observe(1.0); // In case of back pressure, we delay processing proposal. This is done by resending the @@ -838,30 +947,31 @@ impl RoundManager { .insert_block(proposal.clone()) .await .context("[RoundManager] Failed to execute_and_insert the block")?; - self.resend_verified_proposal_to_self( + Self::resend_verified_proposal_to_self( + self.block_store.clone(), + self.buffered_proposal_tx.clone(), proposal, author, BACK_PRESSURE_POLLING_INTERVAL_MS, self.local_config.round_initial_timeout_ms, ) .await; - Ok(()) - } else { - counters::CONSENSUS_WITHOLD_VOTE_BACKPRESSURE_TRIGGERED.observe(0.0); - self.process_verified_proposal(proposal).await + return Ok(()); } + + counters::CONSENSUS_WITHOLD_VOTE_BACKPRESSURE_TRIGGERED.observe(0.0); + self.process_verified_proposal(proposal).await } async fn resend_verified_proposal_to_self( - &self, + block_store: Arc, + self_sender: aptos_channel::Sender, proposal: Block, author: Author, polling_interval_ms: u64, timeout_ms: u64, ) { let start = Instant::now(); - let block_store = self.block_store.clone(); - let self_sender = self.buffered_proposal_tx.clone(); let event = VerifiedEvent::VerifiedProposalMsg(Box::new(proposal)); tokio::spawn(async move { while start.elapsed() < Duration::from_millis(timeout_ms) { @@ -903,7 +1013,7 @@ impl RoundManager { pub async fn process_verified_proposal(&mut self, proposal: Block) -> anyhow::Result<()> { let proposal_round = proposal.round(); let vote = self - .execute_and_vote(proposal) + .vote_block(proposal) .await .context("[RoundManager] Process proposal")?; self.round_state.record_vote(vote.clone()); @@ -930,12 +1040,12 @@ impl RoundManager { } /// The function generates a VoteMsg for a given proposed_block: - /// * first execute the block and add it to the block store + /// * add the block to the block store /// * then verify the voting rules /// * save the updated state to consensus DB /// * return a VoteMsg with the LedgerInfo to be committed in case the vote gathers QC. - async fn execute_and_vote(&mut self, proposed_block: Block) -> anyhow::Result { - let executed_block = self + async fn vote_block(&mut self, proposed_block: Block) -> anyhow::Result { + let block_arc = self .block_store .insert_block(proposed_block) .await @@ -953,17 +1063,17 @@ impl RoundManager { "[RoundManager] sync_only flag is set, stop voting" ); - let vote_proposal = executed_block.vote_proposal(); + let vote_proposal = block_arc.vote_proposal(); let vote_result = self.safety_rules.lock().construct_and_sign_vote_two_chain( &vote_proposal, self.block_store.highest_2chain_timeout_cert().as_deref(), ); let vote = vote_result.context(format!( "[RoundManager] SafetyRules Rejected {}", - executed_block.block() + block_arc.block() ))?; - if !executed_block.block().is_nil_block() { - observe_block(executed_block.block().timestamp_usecs(), BlockStage::VOTED); + if !block_arc.block().is_nil_block() { + observe_block(block_arc.block().timestamp_usecs(), BlockStage::VOTED); } self.storage @@ -978,9 +1088,14 @@ impl RoundManager { fail_point!("consensus::process_order_vote_msg", |_| { Err(anyhow::anyhow!("Injected error in process_order_vote_msg")) }); - info!( - self.new_log(LogEvent::ReceiveOrderVote), - "{}", order_vote_msg + + let order_vote = order_vote_msg.order_vote(); + debug!( + self.new_log(LogEvent::ReceiveOrderVote) + .remote_peer(order_vote.author()), + epoch = order_vote.ledger_info().epoch(), + round = order_vote.ledger_info().round(), + id = order_vote.ledger_info().consensus_block_id(), ); if self @@ -990,21 +1105,56 @@ impl RoundManager { return Ok(()); } - if order_vote_msg.order_vote().ledger_info().round() - > self.block_store.sync_info().highest_ordered_round() + let highest_ordered_round = self.block_store.sync_info().highest_ordered_round(); + let order_vote_round = order_vote_msg.order_vote().ledger_info().round(); + let li_digest = order_vote_msg.order_vote().ledger_info().hash(); + if order_vote_round > highest_ordered_round + && order_vote_round < highest_ordered_round + 100 { - let vote_reception_result = self - .pending_order_votes - .insert_order_vote(order_vote_msg.order_vote(), &self.epoch_state.verifier); - self.process_order_vote_reception_result(&order_vote_msg, vote_reception_result) - .await?; + // If it is the first order vote received for the block, verify the QC and insert along with QC. + // For the subsequent order votes for the same block, we don't have to verify the QC. Just inserting the + // order vote is enough. + let vote_reception_result = if !self.pending_order_votes.exists(&li_digest) { + let start = Instant::now(); + order_vote_msg + .quorum_cert() + .verify(&self.epoch_state().verifier) + .context("[OrderVoteMsg QuorumCert verification failed")?; + counters::VERIFY_MSG + .with_label_values(&["order_vote_qc"]) + .observe(start.elapsed().as_secs_f64()); + self.pending_order_votes.insert_order_vote( + order_vote_msg.order_vote(), + &self.epoch_state.verifier, + Some(order_vote_msg.quorum_cert().clone()), + ) + } else { + self.pending_order_votes.insert_order_vote( + order_vote_msg.order_vote(), + &self.epoch_state.verifier, + None, + ) + }; + self.process_order_vote_reception_result( + vote_reception_result, + order_vote_msg.order_vote().author(), + ) + .await?; } else { - ORDER_VOTE_VERY_OLD.inc(); - info!( - "Received old order vote. Order vote round: {:?}, Highest ordered round: {:?}", + ORDER_VOTE_NOT_IN_RANGE.inc(); + sample!( + SampleRate::Duration(Duration::from_secs(1)), + info!( + "[sampled] Received an order vote not in the 100 rounds. Order vote round: {:?}, Highest ordered round: {:?}", + order_vote_msg.order_vote().ledger_info().round(), + self.block_store.sync_info().highest_ordered_round() + ) + ); + debug!( + "Received an order vote not in the next 100 rounds. Order vote round: {:?}, Highest ordered round: {:?}", order_vote_msg.order_vote().ledger_info().round(), self.block_store.sync_info().highest_ordered_round() - ); + ) } } Ok(()) @@ -1077,16 +1227,26 @@ impl RoundManager { async fn process_vote(&mut self, vote: &Vote) -> anyhow::Result<()> { let round = vote.vote_data().proposed().round(); - info!( - self.new_log(LogEvent::ReceiveVote) - .remote_peer(vote.author()), - vote = %vote, - vote_epoch = vote.vote_data().proposed().epoch(), - vote_round = vote.vote_data().proposed().round(), - vote_id = vote.vote_data().proposed().id(), - vote_state = vote.vote_data().proposed().executed_state_id(), - is_timeout = vote.is_timeout(), - ); + if vote.is_timeout() { + info!( + self.new_log(LogEvent::ReceiveVote) + .remote_peer(vote.author()), + vote = %vote, + epoch = vote.vote_data().proposed().epoch(), + round = vote.vote_data().proposed().round(), + id = vote.vote_data().proposed().id(), + state = vote.vote_data().proposed().executed_state_id(), + is_timeout = vote.is_timeout(), + ); + } else { + debug!( + self.new_log(LogEvent::ReceiveVote) + .remote_peer(vote.author()), + epoch = vote.vote_data().proposed().epoch(), + round = vote.vote_data().proposed().round(), + id = vote.vote_data().proposed().id(), + ); + } if !self.local_config.broadcast_vote && !vote.is_timeout() { // Unlike timeout votes regular votes are sent to the leaders of the next round only. @@ -1138,6 +1298,15 @@ impl RoundManager { qc ))?; if self.onchain_config.order_vote_enabled() { + // This check is already done in safety rules. As printing the "failed to broadcast order vote" + // in humio logs could sometimes look scary, we are doing the same check again here. + if let Some(last_sent_vote) = self.round_state.vote_sent() { + if let Some((two_chain_timeout, _)) = last_sent_vote.two_chain_timeout() { + if round <= two_chain_timeout.round() { + return Ok(()); + } + } + } // Broadcast order vote if the QC is successfully aggregated // Even if broadcast order vote fails, the function will return Ok if let Err(e) = self.broadcast_order_vote(vote, qc.clone()).await { @@ -1170,15 +1339,18 @@ impl RoundManager { async fn process_order_vote_reception_result( &mut self, - order_vote_msg: &OrderVoteMsg, result: OrderVoteReceptionResult, + preferred_peer: Author, ) -> anyhow::Result<()> { match result { - OrderVoteReceptionResult::NewLedgerInfoWithSignatures(ledger_info_with_signatures) => { + OrderVoteReceptionResult::NewLedgerInfoWithSignatures(( + verified_qc, + ledger_info_with_signatures, + )) => { self.new_ordered_cert( WrappedLedgerInfo::new(VoteData::dummy(), ledger_info_with_signatures), - order_vote_msg.quorum_cert(), - order_vote_msg.order_vote().author(), + verified_qc, + preferred_peer, ) .await }, @@ -1207,31 +1379,63 @@ impl RoundManager { result } - // Insert ordered certificate formed by aggregating order votes - async fn new_ordered_cert( + async fn new_qc_from_order_vote_msg( &mut self, - ordered_cert: WrappedLedgerInfo, - quorum_cert: &QuorumCert, + verified_qc: Arc, preferred_peer: Author, ) -> anyhow::Result<()> { - ensure!( - ordered_cert.commit_info().id() == quorum_cert.certified_block().id(), - "QuorumCert attached to order votes doesn't match" - ); - if self + match self .block_store - .get_block(ordered_cert.commit_info().id()) - .is_none() + .need_fetch_for_quorum_cert(verified_qc.as_ref()) { - ORDER_CERT_CREATED_WITHOUT_BLOCK_IN_BLOCK_STORE.inc(); + NeedFetchResult::QCAlreadyExist => Ok(()), + NeedFetchResult::QCBlockExist => { + // If the block is already in the block store, but QC isn't available in the block store, insert QC. + let result = self + .block_store + .insert_quorum_cert( + verified_qc.as_ref(), + &mut self.create_block_retriever(preferred_peer), + ) + .await + .context("[RoundManager] Failed to process the QC from order vote msg"); + self.process_certificates().await?; + result + }, + NeedFetchResult::NeedFetch => { + // If the block doesn't exist, we could ideally do sync up based on the qc. + // But this could trigger fetching a lot of past blocks in case the node is lagging behind. + // So, we just log a warning here to avoid a long sequence of block fetchs. + // One of the subsequence syncinfo messages will trigger the block fetch or state sync if required. + ORDER_CERT_CREATED_WITHOUT_BLOCK_IN_BLOCK_STORE.inc(); + sample!( + SampleRate::Duration(Duration::from_millis(200)), + info!( + "Ordered certificate created without block in block store: {:?}", + verified_qc.certified_block() + ); + ); + Err(anyhow::anyhow!( + "Ordered certificate created without block in block store" + )) + }, + NeedFetchResult::QCRoundBeforeRoot => { + Err(anyhow::anyhow!("Ordered certificate is old")) + }, } - self.block_store - .insert_quorum_cert( - quorum_cert, - &mut self.create_block_retriever(preferred_peer), - ) - .await - .context("RoundManager] Failed to process QC in order Cert")?; + } + + // Insert ordered certificate formed by aggregating order votes + async fn new_ordered_cert( + &mut self, + ordered_cert: WrappedLedgerInfo, + verified_qc: Arc, + preferred_peer: Author, + ) -> anyhow::Result<()> { + self.new_qc_from_order_vote_msg(verified_qc, preferred_peer) + .await?; + + // If the block and qc now exist in the quorum store, insert the ordered cert let result = self .block_store .insert_ordered_cert(&ordered_cert) @@ -1287,12 +1491,19 @@ impl RoundManager { } fn new_log(&self, event: LogEvent) -> LogSchema { - LogSchema::new(event) - .round(self.round_state.current_round()) - .epoch(self.epoch_state.epoch) + Self::new_log_with_round_epoch( + event, + self.round_state().current_round(), + self.epoch_state().epoch, + ) + } + + fn new_log_with_round_epoch(event: LogEvent, round: Round, epoch: u64) -> LogSchema { + LogSchema::new(event).round(round).epoch(epoch) } /// Mainloop of processing messages. + #[allow(clippy::unwrap_used)] pub async fn start( mut self, mut event_rx: aptos_channel::Receiver< @@ -1366,11 +1577,27 @@ impl RoundManager { Ok(_) => trace!(RoundStateLogSchema::new(round_state)), Err(e) => { counters::ERROR_COUNT.inc(); - warn!(error = ?e, kind = error_kind(&e), RoundStateLogSchema::new(round_state)); + warn!(kind = error_kind(&e), RoundStateLogSchema::new(round_state), "Error: {:#}", e); } } } }, + Some((result, block, start_time)) = self.futures.next() => { + let elapsed = start_time.elapsed().as_secs_f64(); + let id = block.id(); + match result { + Ok(()) => { + counters::CONSENSUS_PROPOSAL_PAYLOAD_FETCH_DURATION.with_label_values(&["success"]).observe(elapsed); + if let Err(e) = monitor!("payload_fetch_proposal_process", self.check_backpressure_and_process_proposal(block)).await { + warn!("failed process proposal after payload fetch for block {}: {}", id, e); + } + }, + Err(err) => { + counters::CONSENSUS_PROPOSAL_PAYLOAD_FETCH_DURATION.with_label_values(&["error"]).observe(elapsed); + warn!("unable to fetch payload for block {}: {}", id, err); + }, + }; + }, (peer_id, event) = event_rx.select_next_some() => { let result = match event { VerifiedEvent::VoteMsg(vote_msg) => { @@ -1398,17 +1625,17 @@ impl RoundManager { Ok(_) => trace!(RoundStateLogSchema::new(round_state)), Err(e) => { counters::ERROR_COUNT.inc(); - warn!(error = ?e, kind = error_kind(&e), RoundStateLogSchema::new(round_state)); + warn!(kind = error_kind(&e), RoundStateLogSchema::new(round_state), "Error: {:#}", e); } } - } + }, } } info!(epoch = self.epoch_state().epoch, "RoundManager stopped"); } #[cfg(feature = "failpoints")] - fn check_whether_to_inject_reconfiguration_error(&self) -> bool { + fn check_whether_to_inject_reconfiguration_error() -> bool { fail_point!("consensus::inject_reconfiguration_error", |_| true); false } @@ -1420,7 +1647,8 @@ impl RoundManager { /// It's only enabled with fault injection (failpoints feature). #[cfg(feature = "failpoints")] async fn attempt_to_inject_reconfiguration_error( - &self, + epoch_state: Arc, + network: Arc, proposal_msg: &ProposalMsg, ) -> anyhow::Result<()> { let block_data = proposal_msg.proposal().block_data(); @@ -1433,13 +1661,12 @@ impl RoundManager { block_data.round() == block_data.quorum_cert().certified_block().round() + 1; let should_inject = direct_suffix && continuous_round; if should_inject { - let mut half_peers: Vec<_> = self - .epoch_state + let mut half_peers: Vec<_> = epoch_state .verifier .get_ordered_account_addresses_iter() .collect(); half_peers.truncate(half_peers.len() / 2); - self.network + network .send_proposal(proposal_msg.clone(), half_peers) .await; Err(anyhow::anyhow!("Injected error in reconfiguration suffix")) diff --git a/consensus/src/round_manager_fuzzing.rs b/consensus/src/round_manager_fuzzing.rs index 8c58935500ffc..ab7a14740624f 100644 --- a/consensus/src/round_manager_fuzzing.rs +++ b/consensus/src/round_manager_fuzzing.rs @@ -2,6 +2,8 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use crate::{ block_storage::{pending_blocks::PendingBlocks, BlockStore}, liveness::{ @@ -14,7 +16,7 @@ use crate::{ metrics_safety_rules::MetricsSafetyRules, network::NetworkSender, network_interface::{ConsensusNetworkClient, DIRECT_SEND, RPC}, - payload_manager::PayloadManager, + payload_manager::DirectMempoolPayloadManager, persistent_liveness_storage::{PersistentLivenessStorage, RecoveryData}, pipeline::execution_client::DummyExecutionClient, round_manager::RoundManager, @@ -26,7 +28,7 @@ use aptos_config::{ config::{ConsensusConfig, QcAggregatorType}, network_id::NetworkId, }; -use aptos_consensus_types::proposal_msg::ProposalMsg; +use aptos_consensus_types::{proposal_msg::ProposalMsg, utils::PayloadTxnsSize}; use aptos_infallible::Mutex; use aptos_network::{ application::{interface::NetworkClient, storage::PeersAndMetadata}, @@ -56,10 +58,10 @@ use tokio::runtime::Runtime; // This generates a proposal for round 1 pub fn generate_corpus_proposal() -> Vec { - let mut round_manager = create_node_for_fuzzing(); + let round_manager = create_node_for_fuzzing(); block_on(async { let proposal = round_manager - .generate_proposal(NewRoundEvent { + .generate_proposal_for_test(NewRoundEvent { round: 1, reason: NewRoundReason::QCReady, timeout: std::time::Duration::new(5, 0), @@ -90,7 +92,7 @@ fn build_empty_store( 10, // max pruned blocks in mem Arc::new(SimulatedTimeService::new()), 10, - Arc::from(PayloadManager::DirectMempool), + Arc::from(DirectMempoolPayloadManager::new()), false, Arc::new(Mutex::new(PendingBlocks::new())), )) @@ -182,11 +184,11 @@ fn create_node_for_fuzzing() -> RoundManager { Arc::new(MockPayloadManager::new(None)), time_service, Duration::ZERO, + PayloadTxnsSize::new(1, 1024), 1, - 1024, - 1, - 1024, + PayloadTxnsSize::new(1, 1024), 10, + 1, PipelineBackpressureConfig::new_no_backoff(), ChainHealthBackoffConfig::new_no_backoff(), false, diff --git a/consensus/src/round_manager_test.rs b/consensus/src/round_manager_test.rs index c5a4d3d1e23dd..a01fef7b06bab 100644 --- a/consensus/src/round_manager_test.rs +++ b/consensus/src/round_manager_test.rs @@ -4,6 +4,7 @@ use crate::{ block_storage::{pending_blocks::PendingBlocks, BlockReader, BlockStore}, + counters, liveness::{ proposal_generator::{ ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, @@ -16,7 +17,7 @@ use crate::{ network::{IncomingBlockRetrievalRequest, NetworkSender}, network_interface::{CommitMessage, ConsensusMsg, ConsensusNetworkClient, DIRECT_SEND, RPC}, network_tests::{NetworkPlayground, TwinId}, - payload_manager::PayloadManager, + payload_manager::DirectMempoolPayloadManager, persistent_liveness_storage::RecoveryData, pipeline::buffer_manager::OrderedBlocks, round_manager::RoundManager, @@ -43,6 +44,7 @@ use aptos_consensus_types::{ proposal_msg::ProposalMsg, sync_info::SyncInfo, timeout_2chain::{TwoChainTimeout, TwoChainTimeoutWithPartialSignatures}, + utils::PayloadTxnsSize, vote_msg::VoteMsg, }; use aptos_crypto::HashValue; @@ -291,7 +293,7 @@ impl NodeSetup { 10, // max pruned blocks in mem time_service.clone(), 10, - Arc::from(PayloadManager::DirectMempool), + Arc::from(DirectMempoolPayloadManager::new()), false, Arc::new(Mutex::new(PendingBlocks::new())), )); @@ -303,11 +305,11 @@ impl NodeSetup { Arc::new(MockPayloadManager::new(None)), time_service.clone(), Duration::ZERO, + PayloadTxnsSize::new(20, 1000), 10, - 1000, - 5, - 500, + PayloadTxnsSize::new(5, 500), 10, + 1, PipelineBackpressureConfig::new_no_backoff(), ChainHealthBackoffConfig::new_no_backoff(), false, @@ -1146,11 +1148,13 @@ fn new_round_on_timeout_certificate() { None, ), ); + let before = counters::ERROR_COUNT.get(); assert!(node .round_manager .process_proposal_msg(old_good_proposal) .await - .is_err()); + .is_ok()); // we eat the error + assert_eq!(counters::ERROR_COUNT.get(), before + 1); // but increase the counter }); } diff --git a/consensus/src/state_computer.rs b/consensus/src/state_computer.rs index 2b635c06f94af..0038929bbfac7 100644 --- a/consensus/src/state_computer.rs +++ b/consensus/src/state_computer.rs @@ -9,7 +9,8 @@ use crate::{ error::StateSyncError, execution_pipeline::ExecutionPipeline, monitor, - payload_manager::PayloadManager, + payload_manager::TPayloadManager, + pipeline::pipeline_phase::CountedRequest, state_replication::{StateComputer, StateComputerCommitCallBackType}, transaction_deduper::TransactionDeduper, transaction_filter::TransactionFilter, @@ -18,39 +19,26 @@ use crate::{ }; use anyhow::Result; use aptos_consensus_notifications::ConsensusNotificationSender; -use aptos_consensus_types::{block::Block, common::Round, pipelined_block::PipelinedBlock}; +use aptos_consensus_types::{ + block::Block, common::Round, pipeline_execution_result::PipelineExecutionResult, + pipelined_block::PipelinedBlock, +}; use aptos_crypto::HashValue; -use aptos_executor_types::{BlockExecutorTrait, ExecutorResult, StateComputeResult}; +use aptos_executor_types::{BlockExecutorTrait, ExecutorResult}; use aptos_infallible::RwLock; use aptos_logger::prelude::*; use aptos_types::{ - account_address::AccountAddress, - block_executor::config::BlockExecutorConfigFromOnchain, - contract_event::ContractEvent, - epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, - randomness::Randomness, - transaction::{SignedTransaction, Transaction}, + account_address::AccountAddress, block_executor::config::BlockExecutorConfigFromOnchain, + contract_event::ContractEvent, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, + randomness::Randomness, transaction::Transaction, }; use fail::fail_point; use futures::{future::BoxFuture, SinkExt, StreamExt}; -use std::{boxed::Box, sync::Arc}; +use std::{boxed::Box, sync::Arc, time::Instant}; use tokio::sync::Mutex as AsyncMutex; pub type StateComputeResultFut = BoxFuture<'static, ExecutorResult>; -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct PipelineExecutionResult { - pub input_txns: Vec, - pub result: StateComputeResult, -} - -impl PipelineExecutionResult { - pub fn new(input_txns: Vec, result: StateComputeResult) -> Self { - Self { input_txns, result } - } -} - type NotificationType = ( Box, Vec, @@ -72,7 +60,7 @@ impl LogicalTime { #[derive(Clone)] struct MutableState { validators: Arc<[AccountAddress]>, - payload_manager: Arc, + payload_manager: Arc, transaction_shuffler: Arc, block_executor_onchain_config: BlockExecutorConfigFromOnchain, transaction_deduper: Arc, @@ -99,6 +87,7 @@ impl ExecutionProxy { state_sync_notifier: Arc, handle: &tokio::runtime::Handle, txn_filter: TransactionFilter, + enable_pre_commit: bool, ) -> Self { let (tx, mut rx) = aptos_channels::new::(10, &counters::PENDING_STATE_SYNC_NOTIFICATION); @@ -115,7 +104,8 @@ impl ExecutionProxy { callback(); } }); - let execution_pipeline = ExecutionPipeline::spawn(executor.clone(), handle); + let execution_pipeline = + ExecutionPipeline::spawn(executor.clone(), handle, enable_pre_commit); Self { executor, txn_notifier, @@ -167,6 +157,7 @@ impl StateComputer for ExecutionProxy { // The parent block id. parent_block_id: HashValue, randomness: Option, + lifetime_guard: CountedRequest<()>, ) -> StateComputeResultFut { let block_id = block.id(); debug!( @@ -205,6 +196,7 @@ impl StateComputer for ExecutionProxy { block.new_block_metadata(&validators).into() }; + let pipeline_entry_time = Instant::now(); let fut = self .execution_pipeline .queue( @@ -213,8 +205,12 @@ impl StateComputer for ExecutionProxy { parent_block_id, transaction_generator, block_executor_onchain_config, + lifetime_guard, ) .await; + observe_block(timestamp, BlockStage::EXECUTION_PIPELINE_INSERTED); + counters::PIPELINE_ENTRY_TO_INSERTED_TIME.observe_duration(pipeline_entry_time.elapsed()); + let pipeline_inserted_timestamp = Instant::now(); Box::pin(async move { let pipeline_execution_result = fut.await?; @@ -226,6 +222,8 @@ impl StateComputer for ExecutionProxy { let result = &pipeline_execution_result.result; observe_block(timestamp, BlockStage::EXECUTED); + counters::PIPELINE_INSERTION_TO_EXECUTED_TIME + .observe_duration(pipeline_inserted_timestamp.elapsed()); let compute_status = result.compute_status_for_input_txns(); // the length of compute_status is user_txns.len() + num_vtxns + 1 due to having blockmetadata @@ -266,7 +264,6 @@ impl StateComputer for ExecutionProxy { callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { let mut latest_logical_time = self.write_mutex.lock().await; - let mut block_ids = Vec::new(); let mut txns = Vec::new(); let mut subscribable_txn_events = Vec::new(); let mut payloads = Vec::new(); @@ -287,15 +284,20 @@ impl StateComputer for ExecutionProxy { .as_ref() .cloned() .expect("must be set within an epoch"); + let mut pre_commit_futs = Vec::with_capacity(blocks.len()); for block in blocks { - block_ids.push(block.id()); - if let Some(payload) = block.block().payload() { payloads.push(payload.clone()); } txns.extend(self.transactions_to_commit(block, &validators, is_randomness_enabled)); subscribable_txn_events.extend(block.subscribable_events()); + pre_commit_futs.push(block.take_pre_commit_fut()); + } + + // wait until all blocks are committed + for pre_commit_fut in pre_commit_futs { + pre_commit_fut.await? } let executor = self.executor.clone(); @@ -304,7 +306,7 @@ impl StateComputer for ExecutionProxy { "commit_block", tokio::task::spawn_blocking(move || { executor - .commit_blocks_ext(block_ids, proof, false) + .commit_ledger(proof) .expect("Failed to commit blocks"); }) .await @@ -382,7 +384,7 @@ impl StateComputer for ExecutionProxy { fn new_epoch( &self, epoch_state: &EpochState, - payload_manager: Arc, + payload_manager: Arc, transaction_shuffler: Arc, block_executor_onchain_config: BlockExecutorConfigFromOnchain, transaction_deduper: Arc, @@ -412,12 +414,15 @@ impl StateComputer for ExecutionProxy { #[tokio::test] async fn test_commit_sync_race() { use crate::{ - error::MempoolError, transaction_deduper::create_transaction_deduper, + error::MempoolError, payload_manager::DirectMempoolPayloadManager, + transaction_deduper::create_transaction_deduper, transaction_shuffler::create_transaction_shuffler, }; use aptos_config::config::transaction_filter_type::Filter; use aptos_consensus_notifications::Error; - use aptos_executor_types::state_checkpoint_output::StateCheckpointOutput; + use aptos_executor_types::{ + state_checkpoint_output::StateCheckpointOutput, StateComputeResult, + }; use aptos_infallible::Mutex; use aptos_types::{ aggregate_signature::AggregateSignature, @@ -468,11 +473,17 @@ async fn test_commit_sync_race() { todo!() } - fn commit_blocks_ext( + fn pre_commit_block( + &self, + _block_id: HashValue, + _parent_block_id: HashValue, + ) -> ExecutorResult<()> { + todo!() + } + + fn commit_ledger( &self, - _block_ids: Vec, ledger_info_with_sigs: LedgerInfoWithSignatures, - _save_state_snapshots: bool, ) -> ExecutorResult<()> { *self.time.lock() = LogicalTime::new( ledger_info_with_sigs.ledger_info().epoch(), @@ -540,11 +551,12 @@ async fn test_commit_sync_race() { recorded_commit.clone(), &tokio::runtime::Handle::current(), TransactionFilter::new(Filter::empty()), + true, ); executor.new_epoch( &EpochState::empty(), - Arc::new(PayloadManager::DirectMempool), + Arc::new(DirectMempoolPayloadManager {}), create_transaction_shuffler(TransactionShufflerType::NoShuffling), BlockExecutorConfigFromOnchain::new_no_block_limit(), create_transaction_deduper(TransactionDeduperType::NoDedup), diff --git a/consensus/src/state_computer_tests.rs b/consensus/src/state_computer_tests.rs index 308eeeaea8729..b93d33a792ec4 100644 --- a/consensus/src/state_computer_tests.rs +++ b/consensus/src/state_computer_tests.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - error::MempoolError, payload_manager::PayloadManager, state_computer::ExecutionProxy, + error::MempoolError, payload_manager::DirectMempoolPayloadManager, + pipeline::pipeline_phase::CountedRequest, state_computer::ExecutionProxy, state_replication::StateComputer, transaction_deduper::NoOpDeduper, transaction_filter::TransactionFilter, transaction_shuffler::NoOpShuffler, txn_notifier::TxnNotifier, @@ -26,7 +27,7 @@ use aptos_types::{ validator_txn::ValidatorTransaction, }; use futures_channel::oneshot; -use std::sync::Arc; +use std::sync::{atomic::AtomicU64, Arc}; use tokio::runtime::Handle; struct DummyStateSyncNotifier { @@ -121,11 +122,17 @@ impl BlockExecutorTrait for DummyBlockExecutor { Ok(StateComputeResult::new_dummy()) } - fn commit_blocks_ext( + fn pre_commit_block( + &self, + _block_id: HashValue, + _parent_block_id: HashValue, + ) -> ExecutorResult<()> { + Ok(()) + } + + fn commit_ledger( &self, - _block_ids: Vec, _ledger_info_with_sigs: LedgerInfoWithSignatures, - _save_state_snapshots: bool, ) -> ExecutorResult<()> { Ok(()) } @@ -136,6 +143,8 @@ impl BlockExecutorTrait for DummyBlockExecutor { #[tokio::test] #[cfg(test)] async fn schedule_compute_should_discover_validator_txns() { + use crate::payload_manager::DirectMempoolPayloadManager; + let executor = Arc::new(DummyBlockExecutor::new()); let execution_policy = ExecutionProxy::new( @@ -144,6 +153,7 @@ async fn schedule_compute_should_discover_validator_txns() { Arc::new(DummyStateSyncNotifier::new()), &Handle::current(), TransactionFilter::new(Filter::empty()), + true, ); let validator_txn_0 = ValidatorTransaction::dummy(vec![0xFF; 99]); @@ -162,7 +172,7 @@ async fn schedule_compute_should_discover_validator_txns() { execution_policy.new_epoch( &epoch_state, - Arc::new(PayloadManager::DirectMempool), + Arc::new(DirectMempoolPayloadManager::new()), Arc::new(NoOpShuffler {}), BlockExecutorConfigFromOnchain::new_no_block_limit(), Arc::new(NoOpDeduper {}), @@ -171,7 +181,7 @@ async fn schedule_compute_should_discover_validator_txns() { // Ensure the dummy executor has received the txns. let _ = execution_policy - .schedule_compute(&block, HashValue::zero(), None) + .schedule_compute(&block, HashValue::zero(), None, dummy_guard()) .await .await; @@ -195,8 +205,9 @@ async fn commit_should_discover_validator_txns() { Arc::new(DummyBlockExecutor::new()), Arc::new(DummyTxnNotifier {}), state_sync_notifier.clone(), - &tokio::runtime::Handle::current(), + &Handle::current(), TransactionFilter::new(Filter::empty()), + true, ); let validator_txn_0 = ValidatorTransaction::dummy(vec![0xFF; 99]); @@ -224,11 +235,12 @@ async fn commit_should_discover_validator_txns() { vec![], state_compute_result, ))]; + blocks[0].mark_successful_pre_commit_for_test(); let epoch_state = EpochState::empty(); execution_policy.new_epoch( &epoch_state, - Arc::new(PayloadManager::DirectMempool), + Arc::new(DirectMempoolPayloadManager::new()), Arc::new(NoOpShuffler {}), BlockExecutorConfigFromOnchain::new_no_block_limit(), Arc::new(NoOpDeduper {}), @@ -262,3 +274,7 @@ async fn commit_should_discover_validator_txns() { assert_eq!(&validator_txn_0, supposed_validator_txn_0); assert_eq!(&validator_txn_1, supposed_validator_txn_1); } + +fn dummy_guard() -> CountedRequest<()> { + CountedRequest::new((), Arc::new(AtomicU64::new(0))) +} diff --git a/consensus/src/state_replication.rs b/consensus/src/state_replication.rs index 26da5fa80d163..df02ca23194c3 100644 --- a/consensus/src/state_replication.rs +++ b/consensus/src/state_replication.rs @@ -3,11 +3,9 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - error::StateSyncError, - payload_manager::PayloadManager, - state_computer::{PipelineExecutionResult, StateComputeResultFut}, - transaction_deduper::TransactionDeduper, - transaction_shuffler::TransactionShuffler, + error::StateSyncError, payload_manager::TPayloadManager, + pipeline::pipeline_phase::CountedRequest, state_computer::StateComputeResultFut, + transaction_deduper::TransactionDeduper, transaction_shuffler::TransactionShuffler, }; use anyhow::Result; use aptos_consensus_types::{block::Block, pipelined_block::PipelinedBlock}; @@ -27,22 +25,6 @@ pub type StateComputerCommitCallBackType = /// StateComputer is using proposed block ids for identifying the transactions. #[async_trait::async_trait] pub trait StateComputer: Send + Sync { - /// How to execute a sequence of transactions and obtain the next state. While some of the - /// transactions succeed, some of them can fail. - /// In case all the transactions are failed, new_state_id is equal to the previous state id. - async fn compute( - &self, - // The block that will be computed. - block: &Block, - // The parent block root hash. - parent_block_id: HashValue, - randomness: Option, - ) -> ExecutorResult { - self.schedule_compute(block, parent_block_id, randomness) - .await - .await - } - async fn schedule_compute( &self, // The block that will be computed. @@ -50,8 +32,9 @@ pub trait StateComputer: Send + Sync { // The parent block root hash. _parent_block_id: HashValue, _randomness: Option, + _lifetime_guard: CountedRequest<()>, ) -> StateComputeResultFut { - unimplemented!("This state computer does not support scheduling"); + unimplemented!(); } /// Send a successful commit. A future is fulfilled when the state is finalized. @@ -72,7 +55,7 @@ pub trait StateComputer: Send + Sync { fn new_epoch( &self, epoch_state: &EpochState, - payload_manager: Arc, + payload_manager: Arc, transaction_shuffler: Arc, block_executor_onchain_config: BlockExecutorConfigFromOnchain, transaction_deduper: Arc, diff --git a/consensus/src/test_utils/mock_execution_client.rs b/consensus/src/test_utils/mock_execution_client.rs index 02a20aba62acb..2649af9fd3b31 100644 --- a/consensus/src/test_utils/mock_execution_client.rs +++ b/consensus/src/test_utils/mock_execution_client.rs @@ -5,7 +5,7 @@ use crate::{ error::StateSyncError, network::{IncomingCommitRequest, IncomingRandGenRequest}, - payload_manager::PayloadManager, + payload_manager::{DirectMempoolPayloadManager, TPayloadManager}, pipeline::{ buffer_manager::OrderedBlocks, execution_client::TExecutionClient, signing_phase::CommitSignerProvider, @@ -20,7 +20,7 @@ use aptos_consensus_types::{ common::{Payload, Round}, pipelined_block::PipelinedBlock, }; -use aptos_crypto::HashValue; +use aptos_crypto::{bls12381::PrivateKey, HashValue}; use aptos_executor_types::ExecutorResult; use aptos_infallible::Mutex; use aptos_logger::prelude::*; @@ -40,7 +40,7 @@ pub struct MockExecutionClient { executor_channel: UnboundedSender, consensus_db: Arc, block_cache: Mutex>, - payload_manager: Arc, + payload_manager: Arc, } impl MockExecutionClient { @@ -54,7 +54,7 @@ impl MockExecutionClient { executor_channel, consensus_db, block_cache: Mutex::new(HashMap::new()), - payload_manager: Arc::from(PayloadManager::DirectMempool), + payload_manager: Arc::from(DirectMempoolPayloadManager::new()), } } @@ -94,16 +94,17 @@ impl MockExecutionClient { impl TExecutionClient for MockExecutionClient { async fn start_epoch( &self, + _maybe_consensus_key: Option>, _epoch_state: Arc, _commit_signer_provider: Arc, - _payload_manager: Arc, + _payload_manager: Arc, _onchain_consensus_config: &OnChainConsensusConfig, _onchain_execution_config: &OnChainExecutionConfig, _onchain_randomness_config: &OnChainRandomnessConfig, _rand_config: Option, _fast_rand_config: Option, _rand_msg_rx: aptos_channel::Receiver, - _highest_ordered_round: Round, + _highest_committed_round: Round, ) { } @@ -171,5 +172,9 @@ impl TExecutionClient for MockExecutionClient { Ok(()) } + async fn reset(&self, _target: &LedgerInfoWithSignatures) -> Result<()> { + Ok(()) + } + async fn end_epoch(&self) {} } diff --git a/consensus/src/test_utils/mock_payload_manager.rs b/consensus/src/test_utils/mock_payload_manager.rs index d28337e51ebfe..e62ec85b1ea9a 100644 --- a/consensus/src/test_utils/mock_payload_manager.rs +++ b/consensus/src/test_utils/mock_payload_manager.rs @@ -3,13 +3,13 @@ use crate::{ error::QuorumStoreError, - payload_client::{user::quorum_store_client::QuorumStoreClient, PayloadClient}, + payload_client::{ + user::quorum_store_client::QuorumStoreClient, PayloadClient, PayloadPullParameters, + }, }; use anyhow::Result; use aptos_consensus_types::{ - block::block_test_utils::random_payload, - common::{Payload, PayloadFilter}, - request_response::GetPayloadCommand, + block::block_test_utils::random_payload, common::Payload, request_response::GetPayloadCommand, }; use aptos_types::{ transaction::{ExecutionStatus, TransactionStatus}, @@ -19,7 +19,6 @@ use aptos_types::{ use aptos_validator_transaction_pool as vtxn_pool; use futures::{channel::mpsc, future::BoxFuture}; use rand::Rng; -use std::time::Duration; #[allow(dead_code)] pub struct MockPayloadManager { @@ -56,17 +55,9 @@ impl PayloadClient for MockPayloadManager { /// The returned future is fulfilled with the vector of SignedTransactions async fn pull_payload( &self, - _max_poll_time: Duration, - _max_size: u64, - _max_bytes: u64, - _max_inline_size: u64, - _max_inline_bytes: u64, + _params: PayloadPullParameters, _validator_txn_filter: vtxn_pool::TransactionFilter, - _user_txn_filter: PayloadFilter, _wait_callback: BoxFuture<'static, ()>, - _pending_ordering: bool, - _pending_uncommitted_blocks: usize, - _recent_fill_fraction: f32, ) -> Result<(Vec, Payload), QuorumStoreError> { // generate 1k txn is too slow with coverage instrumentation Ok(( diff --git a/consensus/src/test_utils/mock_quorum_store_sender.rs b/consensus/src/test_utils/mock_quorum_store_sender.rs index affff1103b49e..bd962d348b51f 100644 --- a/consensus/src/test_utils/mock_quorum_store_sender.rs +++ b/consensus/src/test_utils/mock_quorum_store_sender.rs @@ -26,13 +26,6 @@ impl MockQuorumStoreSender { #[async_trait::async_trait] impl QuorumStoreSender for MockQuorumStoreSender { - async fn send_batch_request(&self, request: BatchRequest, recipients: Vec) { - self.tx - .send((ConsensusMsg::BatchRequestMsg(Box::new(request)), recipients)) - .await - .expect("could not send"); - } - async fn request_batch( &self, _request: BatchRequest, @@ -42,13 +35,6 @@ impl QuorumStoreSender for MockQuorumStoreSender { unimplemented!(); } - async fn send_batch(&self, batch: Batch, recipients: Vec) { - self.tx - .send((ConsensusMsg::BatchResponse(Box::new(batch)), recipients)) - .await - .expect("could not send"); - } - async fn send_signed_batch_info_msg( &self, signed_batch_infos: Vec, @@ -76,7 +62,7 @@ impl QuorumStoreSender for MockQuorumStoreSender { vec![], )) .await - .unwrap(); + .expect("We should be able to send the proof of store message"); } async fn send_proof_of_store_msg_to_self(&mut self, _proof_of_stores: Vec) { diff --git a/consensus/src/test_utils/mock_state_computer.rs b/consensus/src/test_utils/mock_state_computer.rs index aeef2a0ef6e97..ad602f93f4f75 100644 --- a/consensus/src/test_utils/mock_state_computer.rs +++ b/consensus/src/test_utils/mock_state_computer.rs @@ -4,15 +4,18 @@ use crate::{ error::StateSyncError, - payload_manager::PayloadManager, - pipeline::buffer_manager::OrderedBlocks, - state_computer::{PipelineExecutionResult, StateComputeResultFut}, + payload_manager::TPayloadManager, + pipeline::{buffer_manager::OrderedBlocks, pipeline_phase::CountedRequest}, + state_computer::StateComputeResultFut, state_replication::{StateComputer, StateComputerCommitCallBackType}, transaction_deduper::TransactionDeduper, transaction_shuffler::TransactionShuffler, }; use anyhow::Result; -use aptos_consensus_types::{block::Block, pipelined_block::PipelinedBlock}; +use aptos_consensus_types::{ + block::Block, pipeline_execution_result::PipelineExecutionResult, + pipelined_block::PipelinedBlock, +}; use aptos_crypto::HashValue; use aptos_executor_types::{ExecutorError, ExecutorResult, StateComputeResult}; use aptos_logger::debug; @@ -22,7 +25,7 @@ use aptos_types::{ }; use futures::SinkExt; use futures_channel::mpsc::UnboundedSender; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; pub struct EmptyStateComputer { executor_channel: UnboundedSender, @@ -36,18 +39,6 @@ impl EmptyStateComputer { #[async_trait::async_trait] impl StateComputer for EmptyStateComputer { - async fn compute( - &self, - _block: &Block, - _parent_block_id: HashValue, - _randomness: Option, - ) -> ExecutorResult { - Ok(PipelineExecutionResult::new( - vec![], - StateComputeResult::new_dummy(), - )) - } - async fn commit( &self, blocks: &[Arc], @@ -83,7 +74,7 @@ impl StateComputer for EmptyStateComputer { fn new_epoch( &self, _: &EpochState, - _: Arc, + _: Arc, _: Arc, _: BlockExecutorConfigFromOnchain, _: Arc, @@ -120,6 +111,7 @@ impl StateComputer for RandomComputeResultStateComputer { _block: &Block, parent_block_id: HashValue, _randomness: Option, + _lifetime_guard: CountedRequest<()>, ) -> StateComputeResultFut { // trapdoor for Execution Error let res = if parent_block_id == self.random_compute_result_root_hash { @@ -129,7 +121,14 @@ impl StateComputer for RandomComputeResultStateComputer { self.random_compute_result_root_hash, )) }; - let pipeline_execution_res = res.map(|res| PipelineExecutionResult::new(vec![], res)); + let pipeline_execution_res = res.map(|res| { + PipelineExecutionResult::new( + vec![], + res, + Duration::from_secs(0), + Box::pin(async { Ok(()) }), + ) + }); Box::pin(async move { pipeline_execution_res }) } @@ -149,7 +148,7 @@ impl StateComputer for RandomComputeResultStateComputer { fn new_epoch( &self, _: &EpochState, - _: Arc, + _: Arc, _: Arc, _: BlockExecutorConfigFromOnchain, _: Arc, diff --git a/consensus/src/test_utils/mod.rs b/consensus/src/test_utils/mod.rs index 406c06ca19023..b556d9bfc7ed8 100644 --- a/consensus/src/test_utils/mod.rs +++ b/consensus/src/test_utils/mod.rs @@ -2,7 +2,11 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::block_storage::{BlockReader, BlockStore}; +#![allow(clippy::unwrap_used)] +use crate::{ + block_storage::{BlockReader, BlockStore}, + payload_manager::DirectMempoolPayloadManager, +}; use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::{Author, Round}, @@ -25,8 +29,7 @@ mod mock_state_computer; mod mock_storage; use crate::{ - block_storage::pending_blocks::PendingBlocks, payload_manager::PayloadManager, - pipeline::execution_client::DummyExecutionClient, + block_storage::pending_blocks::PendingBlocks, pipeline::execution_client::DummyExecutionClient, util::mock_time_service::SimulatedTimeService, }; use aptos_consensus_types::{block::block_test_utils::gen_test_certificate, common::Payload}; @@ -90,7 +93,7 @@ pub fn build_empty_tree() -> Arc { 10, // max pruned blocks in mem Arc::new(SimulatedTimeService::new()), 10, - Arc::from(PayloadManager::DirectMempool), + Arc::from(DirectMempoolPayloadManager::new()), false, Arc::new(Mutex::new(PendingBlocks::new())), )) diff --git a/consensus/src/transaction_shuffler/fairness/conflict_key/entry_fun.rs b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/entry_fun.rs similarity index 94% rename from consensus/src/transaction_shuffler/fairness/conflict_key/entry_fun.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/entry_fun.rs index 140191b1d523f..7a3363dd9e959 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_key/entry_fun.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/entry_fun.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::conflict_key::ConflictKey; +use crate::transaction_shuffler::deprecated_fairness::conflict_key::ConflictKey; use aptos_types::transaction::{SignedTransaction, TransactionPayload}; use move_core_types::{identifier::Identifier, language_storage::ModuleId}; diff --git a/consensus/src/transaction_shuffler/fairness/conflict_key/entry_fun_module.rs b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/entry_fun_module.rs similarity index 93% rename from consensus/src/transaction_shuffler/fairness/conflict_key/entry_fun_module.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/entry_fun_module.rs index 56979f98d6d29..948d2e8baa330 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_key/entry_fun_module.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/entry_fun_module.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::conflict_key::ConflictKey; +use crate::transaction_shuffler::deprecated_fairness::conflict_key::ConflictKey; use aptos_types::transaction::{SignedTransaction, TransactionPayload}; use move_core_types::language_storage::ModuleId; diff --git a/consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/mod.rs similarity index 98% rename from consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/mod.rs index caaa2d75a37ad..1e233dbdc29fc 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_key/mod.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/mod.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::TxnIdx; +use crate::transaction_shuffler::deprecated_fairness::TxnIdx; use std::{collections::HashMap, hash::Hash}; pub(crate) mod entry_fun; diff --git a/consensus/src/transaction_shuffler/fairness/conflict_key/test_utils.rs b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/test_utils.rs similarity index 98% rename from consensus/src/transaction_shuffler/fairness/conflict_key/test_utils.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/test_utils.rs index f9b9e9e72fd22..fec79e2c33617 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_key/test_utils.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/test_utils.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::conflict_key::{ +use crate::transaction_shuffler::deprecated_fairness::conflict_key::{ ConflictKey, ConflictKeyId, ConflictKeyRegistry, }; use proptest::prelude::*; diff --git a/consensus/src/transaction_shuffler/fairness/conflict_key/txn_sender.rs b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/txn_sender.rs similarity index 85% rename from consensus/src/transaction_shuffler/fairness/conflict_key/txn_sender.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/txn_sender.rs index 3ec1905d869cb..a742e7c240573 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_key/txn_sender.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_key/txn_sender.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::conflict_key::ConflictKey; +use crate::transaction_shuffler::deprecated_fairness::conflict_key::ConflictKey; use aptos_types::transaction::SignedTransaction; use move_core_types::account_address::AccountAddress; diff --git a/consensus/src/transaction_shuffler/fairness/conflict_zone.rs b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_zone.rs similarity index 85% rename from consensus/src/transaction_shuffler/fairness/conflict_zone.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/conflict_zone.rs index 983baaadedfb9..a685cf701ac07 100644 --- a/consensus/src/transaction_shuffler/fairness/conflict_zone.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/conflict_zone.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::{ +use crate::transaction_shuffler::deprecated_fairness::{ conflict_key::{ConflictKeyId, ConflictKeyRegistry, MapByKeyId}, TxnIdx, }; @@ -57,11 +57,12 @@ impl<'a> ConflictZone<'a> { *self.counts_by_id.get_mut(key_id) += 1; self.sliding_window.push_back(key_id); if self.sliding_window.len() > self.sliding_window_size { - let removed_key_id = self.sliding_window.pop_front().unwrap(); - let count = self.counts_by_id.get_mut(removed_key_id); - *count -= 1; - if *count == 0 && !self.key_registry.is_conflict_exempt(removed_key_id) { - return Some(removed_key_id); + if let Some(removed_key_id) = self.sliding_window.pop_front() { + let count = self.counts_by_id.get_mut(removed_key_id); + *count -= 1; + if *count == 0 && !self.key_registry.is_conflict_exempt(removed_key_id) { + return Some(removed_key_id); + } } } None diff --git a/consensus/src/transaction_shuffler/fairness/mod.rs b/consensus/src/transaction_shuffler/deprecated_fairness/mod.rs similarity index 98% rename from consensus/src/transaction_shuffler/fairness/mod.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/mod.rs index 5c08ee920ce1a..dd6c3a5aeb72e 100644 --- a/consensus/src/transaction_shuffler/fairness/mod.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::transaction_shuffler::{ - fairness::{ + deprecated_fairness::{ conflict_key::{ entry_fun::EntryFunKey, entry_fun_module::EntryFunModuleKey, txn_sender::TxnSenderKey, ConflictKeyRegistry, @@ -178,7 +178,7 @@ impl<'a, const NUM_CONFLICT_ZONES: usize> FairnessShufflerImpl<'a, NUM_CONFLICT_ #[cfg(test)] mod test_utils { - use crate::transaction_shuffler::fairness::FairnessShuffler; + use crate::transaction_shuffler::deprecated_fairness::FairnessShuffler; use proptest::prelude::*; impl FairnessShuffler { diff --git a/consensus/src/transaction_shuffler/fairness/pending_zone.rs b/consensus/src/transaction_shuffler/deprecated_fairness/pending_zone.rs similarity index 97% rename from consensus/src/transaction_shuffler/fairness/pending_zone.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/pending_zone.rs index 8de932b65e601..eb9c2af18455f 100644 --- a/consensus/src/transaction_shuffler/fairness/pending_zone.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/pending_zone.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::{ +use crate::transaction_shuffler::deprecated_fairness::{ conflict_key::{ConflictKeyId, ConflictKeyRegistry, MapByKeyId}, TxnIdx, }; diff --git a/consensus/src/transaction_shuffler/fairness/selection_tracker.rs b/consensus/src/transaction_shuffler/deprecated_fairness/selection_tracker.rs similarity index 93% rename from consensus/src/transaction_shuffler/fairness/selection_tracker.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/selection_tracker.rs index 571f817656ab8..3e10d4368642f 100644 --- a/consensus/src/transaction_shuffler/fairness/selection_tracker.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/selection_tracker.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::TxnIdx; +use crate::transaction_shuffler::deprecated_fairness::TxnIdx; pub struct SelectionTracker { selected_markers: Vec, diff --git a/consensus/src/transaction_shuffler/fairness/tests/manual.rs b/consensus/src/transaction_shuffler/deprecated_fairness/tests/manual.rs similarity index 98% rename from consensus/src/transaction_shuffler/fairness/tests/manual.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/tests/manual.rs index 69cd94c141216..753ca542a8d3f 100644 --- a/consensus/src/transaction_shuffler/fairness/tests/manual.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/tests/manual.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::{ +use crate::transaction_shuffler::deprecated_fairness::{ conflict_key::ConflictKeyRegistry, FairnessShuffler, FairnessShufflerImpl, }; diff --git a/consensus/src/transaction_shuffler/fairness/tests/mod.rs b/consensus/src/transaction_shuffler/deprecated_fairness/tests/mod.rs similarity index 100% rename from consensus/src/transaction_shuffler/fairness/tests/mod.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/tests/mod.rs diff --git a/consensus/src/transaction_shuffler/fairness/tests/proptests.rs b/consensus/src/transaction_shuffler/deprecated_fairness/tests/proptests.rs similarity index 98% rename from consensus/src/transaction_shuffler/fairness/tests/proptests.rs rename to consensus/src/transaction_shuffler/deprecated_fairness/tests/proptests.rs index 0195997fbf768..6d111b03d28e6 100644 --- a/consensus/src/transaction_shuffler/fairness/tests/proptests.rs +++ b/consensus/src/transaction_shuffler/deprecated_fairness/tests/proptests.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::transaction_shuffler::fairness::{ +use crate::transaction_shuffler::deprecated_fairness::{ conflict_key::{ test_utils::{FakeEntryFunKey, FakeEntryFunModuleKey, FakeSenderKey, FakeTxn}, ConflictKeyRegistry, MapByKeyId, diff --git a/consensus/src/transaction_shuffler/mod.rs b/consensus/src/transaction_shuffler/mod.rs index 5cc43f2a2e8da..a75ce60e0cdd6 100644 --- a/consensus/src/transaction_shuffler/mod.rs +++ b/consensus/src/transaction_shuffler/mod.rs @@ -2,18 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_logger::info; -use aptos_types::{ - on_chain_config::{ - TransactionShufflerType, - TransactionShufflerType::{DeprecatedSenderAwareV1, NoShuffling, SenderAwareV2}, - }, - transaction::SignedTransaction, -}; +use aptos_types::{on_chain_config::TransactionShufflerType, transaction::SignedTransaction}; use sender_aware::SenderAwareShuffler; use std::sync::Arc; -mod fairness; +mod deprecated_fairness; mod sender_aware; +mod use_case_aware; /// Interface to shuffle transactions pub trait TransactionShuffler: Send + Sync { @@ -32,6 +27,8 @@ impl TransactionShuffler for NoOpShuffler { pub fn create_transaction_shuffler( shuffler_type: TransactionShufflerType, ) -> Arc { + use TransactionShufflerType::*; + match shuffler_type { NoShuffling => { info!("Using no-op transaction shuffling"); @@ -48,7 +45,7 @@ pub fn create_transaction_shuffler( ); Arc::new(SenderAwareShuffler::new(conflict_window_size as usize)) }, - TransactionShufflerType::Fairness { + DeprecatedFairness { sender_conflict_window_size, module_conflict_window_size, entry_fun_conflict_window_size, @@ -59,11 +56,27 @@ pub fn create_transaction_shuffler( module_conflict_window_size, entry_fun_conflict_window_size ); - Arc::new(fairness::FairnessShuffler { + Arc::new(deprecated_fairness::FairnessShuffler { sender_conflict_window_size: sender_conflict_window_size as usize, module_conflict_window_size: module_conflict_window_size as usize, entry_fun_conflict_window_size: entry_fun_conflict_window_size as usize, }) }, + UseCaseAware { + sender_spread_factor, + platform_use_case_spread_factor, + user_use_case_spread_factor, + } => { + let config = use_case_aware::Config { + sender_spread_factor, + platform_use_case_spread_factor, + user_use_case_spread_factor, + }; + info!( + config = ?config, + "Using use case aware transaction shuffling." + ); + Arc::new(use_case_aware::UseCaseAwareShuffler { config }) + }, } } diff --git a/consensus/src/transaction_shuffler/sender_aware.rs b/consensus/src/transaction_shuffler/sender_aware.rs index 936a9f7e9d0a5..1f1a4ffeabc8d 100644 --- a/consensus/src/transaction_shuffler/sender_aware.rs +++ b/consensus/src/transaction_shuffler/sender_aware.rs @@ -70,7 +70,9 @@ impl TransactionShuffler for SenderAwareShuffler { // If we can't find any candidate in above steps, then lastly // add pending transactions in the order if we can't find any other candidate - pending_txns.remove_first_pending().unwrap() + pending_txns + .remove_first_pending() + .expect("Pending should return a transaction") }; while sliding_window.num_txns() < num_transactions { let txn = next_to_add(&mut sliding_window); @@ -131,11 +133,10 @@ impl PendingTransactions { pub fn remove_first_pending(&mut self) -> Option { while let Some(txn) = self.ordered_txns.pop_front() { let sender = txn.sender(); - // We don't remove the txns from ordered_txns when remove_pending_from_sender is called. - // So it is possible that the ordered_txns has some transactions that are not pending - // anymore. - if Some(txn).as_ref() == self.txns_by_senders.get(&sender).unwrap().front() { - return self.remove_pending_from_sender(sender); + if let Some(sender_queue) = self.txns_by_senders.get(&sender) { + if Some(txn).as_ref() == sender_queue.front() { + return self.remove_pending_from_sender(sender); + } } } None @@ -197,11 +198,18 @@ impl SlidingWindowState { /// Returns the sender which was dropped off of the conflict window in previous iteration. pub fn last_dropped_sender(&self) -> Option { - let prev_start_index = self.start_index - 1; - if prev_start_index >= 0 { - let last_sender = self.txns.get(prev_start_index as usize).unwrap().sender(); - if *self.senders_in_window.get(&last_sender).unwrap() == 0 { - return Some(last_sender); + if self.start_index > 0 { + let prev_start_index = self.start_index - 1; + if let Some(last_sender) = self + .txns + .get(prev_start_index as usize) + .map(|txn| txn.sender()) + { + if let Some(&count) = self.senders_in_window.get(&last_sender) { + if count == 0 { + return Some(last_sender); + } + } } } None diff --git a/consensus/src/transaction_shuffler/use_case_aware/delayed_queue.rs b/consensus/src/transaction_shuffler/use_case_aware/delayed_queue.rs new file mode 100644 index 0000000000000..f79cc03b51246 --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/delayed_queue.rs @@ -0,0 +1,540 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_shuffler::use_case_aware::{ + types::{InputIdx, OutputIdx}, + utils::StrictMap, + Config, +}; +use aptos_types::transaction::use_case::{UseCaseAwareTransaction, UseCaseKey}; +use move_core_types::account_address::AccountAddress; +use std::{ + collections::{hash_map, BTreeMap, HashMap, VecDeque}, + fmt::Debug, +}; + +/// Key used in priority queues. +/// Part of the key is a txn's input index which guarantees in any priority queue, of use cases or +/// accounts, there are not two entries that share the same delay key. Also, when `try_delay_till` +/// is identical, an entry relating to a earlier txn is prioritized. +#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] +struct DelayKey { + try_delay_till: OutputIdx, + input_idx: InputIdx, +} + +impl DelayKey { + fn new(try_delay_till: OutputIdx, input_idx: InputIdx) -> Self { + Self { + try_delay_till, + input_idx, + } + } +} + +impl Debug for DelayKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "DelayKey({}, {})", self.try_delay_till, self.input_idx) + } +} + +struct TxnWithInputIdx { + input_idx: InputIdx, + txn: Txn, +} + +impl Debug for TxnWithInputIdx +where + Txn: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Txn({}: {:?})", self.input_idx, self.txn) + } +} + +#[derive(Debug)] +struct Account { + try_delay_till: OutputIdx, + /// Head txn input_idx, tracked for use when the txns queue is empty, in which case + /// it keeps the value before the last txn was dequeued. + input_idx: InputIdx, + txns: VecDeque>, +} + +impl Account +where + Txn: UseCaseAwareTransaction, +{ + fn new_with_txn(try_delay_till: OutputIdx, input_idx: InputIdx, txn: Txn) -> Self { + let txns = vec![TxnWithInputIdx { input_idx, txn }].into(); + Self { + try_delay_till, + input_idx, + txns, + } + } + + fn new_empty(try_delay_till: OutputIdx, input_idx: InputIdx) -> Self { + Self { + try_delay_till, + input_idx, + txns: VecDeque::new(), + } + } + + fn is_empty(&self) -> bool { + self.txns.is_empty() + } + + fn delay_key(&self) -> DelayKey { + DelayKey { + try_delay_till: self.try_delay_till, + input_idx: self.input_idx, + } + } + + fn expect_first_txn(&self) -> &TxnWithInputIdx { + self.txns.front().expect("Must exist.") + } + + fn expect_use_case_key(&self) -> UseCaseKey { + self.expect_first_txn().txn.parse_use_case() + } + + fn queue_txn(&mut self, input_idx: InputIdx, txn: Txn) { + if let Some(last_txn) = self.txns.back() { + assert!(last_txn.input_idx < input_idx); + } else { + self.input_idx = input_idx; + } + self.txns.push_back(TxnWithInputIdx { input_idx, txn }); + } + + fn expect_dequeue_txn(&mut self) -> TxnWithInputIdx { + let txn = self.txns.pop_front().expect("Must exist."); + if let Some(next_txn) = self.txns.front() { + self.input_idx = next_txn.input_idx; + } + txn + } + + fn update_try_delay_till(&mut self, try_delay_till: OutputIdx) { + self.try_delay_till = try_delay_till; + } +} + +#[derive(Debug)] +struct UseCase { + try_delay_till: OutputIdx, + /// Head account input_idx, tracked for use when the accounts queue is empty, in which case + /// it keeps the value before the last account was removed. + input_idx: InputIdx, + account_by_delay: BTreeMap, +} + +impl UseCase { + fn new_empty(try_delay_till: OutputIdx, input_idx: InputIdx) -> Self { + Self { + try_delay_till, + input_idx, + account_by_delay: BTreeMap::new(), + } + } + + fn new_with_account( + try_delay_till: OutputIdx, + address: AccountAddress, + account: &Account, + ) -> Self + where + Txn: UseCaseAwareTransaction, + { + let mut account_by_delay = BTreeMap::new(); + account_by_delay.strict_insert(account.delay_key(), address); + Self { + try_delay_till, + input_idx: account.input_idx, + account_by_delay, + } + } + + fn is_empty(&self) -> bool { + self.account_by_delay.is_empty() + } + + fn delay_key(&self) -> DelayKey { + // If head account will be ready later than the use case itself, respect that. + let try_delay_till = std::cmp::max( + self.try_delay_till, + self.account_by_delay + .first_key_value() + .map_or(0, |(k, _)| k.try_delay_till), + ); + + DelayKey { + try_delay_till, + input_idx: self.input_idx, + } + } + + /// Expects head account to exist (otherwise panic) and return both the DelayKey and the + /// account address for the entry. + fn expect_pop_head_account(&mut self) -> (DelayKey, AccountAddress) { + let (account_delay_key, address) = self.account_by_delay.pop_first().expect("Must exist."); + if let Some((next_account_delay_key, _)) = self.account_by_delay.first_key_value() { + self.input_idx = next_account_delay_key.input_idx; + } + (account_delay_key, address) + } + + fn update_try_delay_till(&mut self, try_delay_till: OutputIdx) { + self.try_delay_till = try_delay_till; + } + + fn add_account(&mut self, address: AccountAddress, account: &Account) + where + Txn: UseCaseAwareTransaction, + { + let account_delay_key = account.delay_key(); + self.account_by_delay + .strict_insert(account_delay_key, address); + let (_, head_address) = self + .account_by_delay + .first_key_value() + .expect("Must exist."); + if head_address == &address { + self.input_idx = account_delay_key.input_idx; + } + } +} + +/// Structure to track: +/// 1. all use cases and accounts that are subject to delaying, no matter they have pending txns +/// associated or not. +/// 2. all txns that are examined and delayed previously. +/// +/// Note: +/// A delayed txn is attached to an account and the account is attached to a priority queue in a use +/// case, which has an entry in the main priority queue. +/// Empty accounts and use cases are still tracked for the delay so that a next txn in the +/// input stream is properly delayed if associated with such an account or use case. +#[derive(Debug, Default)] +pub(crate) struct DelayedQueue { + /// Registry of all accounts, each of which includes the expected output_idx to delay until and + /// a queue (might be empty) of txns by that sender. + /// + /// An empty account address is tracked in `account_placeholders_by_delay` while a non-empty + /// account address is tracked under `use_cases`. + accounts: HashMap>, + /// Registry of all use cases, each of which includes the expected output_idx to delay until and + /// a priority queue (might be empty) of non-empty accounts whose head txn belongs to that use case. + /// + /// An empty use case is tracked in `use_case_placeholders_by_delay` while a non-empty use case + /// is tracked in the top level `use_cases_by_delay`. + use_cases: HashMap, + + /// Main delay queue of txns. All use cases are non-empty of non-empty accounts. + /// All pending txns are reachable from this nested structure. + /// + /// The DelayKey is derived from the head account's DelayKey combined with the use case's own + /// DelayKey. + /// + /// The head txn of the head account of the head use case in this nested structure is the + /// next txn to be possibly ready. + use_cases_by_delay: BTreeMap, + /// Empty account addresses by the DelayKey (those w/o known delayed txns), kept to track the delay. + account_placeholders_by_delay: BTreeMap, + /// Empty UseCaseKeys by the DelayKey (those w/o known delayed txns), kept to track the delay. + use_case_placeholders_by_delay: BTreeMap, + + /// Externally set output index; when an item has try_delay_till <= output_idx, it's deemed ready + output_idx: OutputIdx, + + config: Config, +} + +impl DelayedQueue +where + Txn: UseCaseAwareTransaction, +{ + pub fn new(config: Config) -> Self { + Self { + accounts: HashMap::new(), + use_cases: HashMap::new(), + + account_placeholders_by_delay: BTreeMap::new(), + use_case_placeholders_by_delay: BTreeMap::new(), + + use_cases_by_delay: BTreeMap::new(), + + output_idx: 0, + + config, + } + } + + /// Remove stale (empty use cases and accounts with try_delay_till <= self.output_idx) placeholders. + fn drain_placeholders(&mut self) { + let least_to_keep = DelayKey::new(self.output_idx + 1, 0); + + let remaining_use_case_placeholders = self + .use_case_placeholders_by_delay + .split_off(&least_to_keep); + let remaining_account_placeholders = + self.account_placeholders_by_delay.split_off(&least_to_keep); + + self.use_case_placeholders_by_delay + .iter() + .for_each(|(_delay_key, use_case_key)| self.use_cases.strict_remove(use_case_key)); + self.account_placeholders_by_delay + .iter() + .for_each(|(_delay_key, address)| self.accounts.strict_remove(address)); + + self.use_case_placeholders_by_delay = remaining_use_case_placeholders; + self.account_placeholders_by_delay = remaining_account_placeholders; + } + + pub fn bump_output_idx(&mut self, output_idx: OutputIdx) { + assert!(output_idx >= self.output_idx); + // It's possible that the queue returned nothing last round hence the output idx didn't move. + if output_idx > self.output_idx { + self.output_idx = output_idx; + self.drain_placeholders(); + } + } + + pub fn pop_head(&mut self, only_if_ready: bool) -> Option { + // See if any delayed txn exists. If not, return None. + let use_case_entry = match self.use_cases_by_delay.first_entry() { + None => { + return None; + }, + Some(occupied_entry) => occupied_entry, + }; + let use_case_delay_key = use_case_entry.key(); + + // Check readiness. + if only_if_ready && use_case_delay_key.try_delay_till > self.output_idx { + return None; + } + + // Gonna return the front txn of the front account of the front use case. + + // First, both the use case and account need to be removed from the priority queues. + let use_case_delay_key = *use_case_delay_key; + let use_case_key = use_case_entry.remove(); + let use_case = self.use_cases.expect_mut(&use_case_key); + let (account_delay_key, address) = use_case.expect_pop_head_account(); + assert!(account_delay_key.try_delay_till <= use_case_delay_key.try_delay_till); + assert_eq!(account_delay_key.input_idx, use_case_delay_key.input_idx); + + // Pop first txn from account (for returning it later). + let account = self.accounts.expect_mut(&address); + let txn = account.expect_dequeue_txn(); + + // Update priorities. + account.update_try_delay_till(self.output_idx + 1 + self.config.sender_spread_factor()); + use_case.update_try_delay_till( + self.output_idx + 1 + self.config.use_case_spread_factor(&use_case_key), + ); + + // Add account and original use case back to delay queues. + + if account.is_empty() { + self.account_placeholders_by_delay + .strict_insert(account.delay_key(), address); + if use_case.is_empty() { + self.use_case_placeholders_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + } else { + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + } + } else { + // See if account now belongs to a different use case. + let new_use_case_key = account.expect_use_case_key(); + if new_use_case_key == use_case_key { + use_case.add_account(address, account); + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + } else { + // Account now belongs to a different use case. + + // Add original use case back to delay queue. + if use_case.is_empty() { + self.use_case_placeholders_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + } else { + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + } + + // Add the account to the new use case. + match self.use_cases.entry(new_use_case_key.clone()) { + hash_map::Entry::Occupied(mut occupied_entry) => { + // Existing use case, remove from priority queues. + let new_use_case = occupied_entry.get_mut(); + if new_use_case.is_empty() { + self.use_case_placeholders_by_delay + .strict_remove(&new_use_case.delay_key()); + } else { + self.use_cases_by_delay + .strict_remove(&new_use_case.delay_key()); + } + // Add account to use case. + new_use_case.add_account(address, account); + // Add new use case back to delay queue. + self.use_cases_by_delay + .strict_insert(new_use_case.delay_key(), new_use_case_key.clone()); + }, + hash_map::Entry::Vacant(entry) => { + // Use case not tracked previously, try_delay_till = output_idx + 1 + let new_use_case = entry.insert(UseCase::new_with_account( + self.output_idx + 1, + address, + account, + )); + self.use_cases_by_delay + .strict_insert(new_use_case.delay_key(), new_use_case_key.clone()); + }, + } + } + } + + Some(txn.txn) + } + + /// Txn has to be delayed, attach it to respective account and use case. + fn queue_txn( + &mut self, + input_idx: InputIdx, + address: AccountAddress, + use_case_key: UseCaseKey, + txn: Txn, + ) { + match self.accounts.get_mut(&address) { + Some(account) => { + if account.is_empty() { + // Account placeholder exists, move it from the placeholder queue to the main queue. + self.account_placeholders_by_delay + .remove(&account.delay_key()); + account.queue_txn(input_idx, txn); + match self.use_cases.entry(use_case_key.clone()) { + hash_map::Entry::Occupied(occupied) => { + let use_case = occupied.into_mut(); + if use_case.is_empty() { + self.use_case_placeholders_by_delay + .strict_remove(&use_case.delay_key()); + } else { + self.use_cases_by_delay.strict_remove(&use_case.delay_key()); + } + use_case.add_account(address, account); + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + }, + hash_map::Entry::Vacant(vacant) => { + // Use case not tracked previously, the use case is ready at the current + // output_idx, instead of output_idx +1 -- it makes a difference if + // a txn later in the input queue that's of the same use case but not + // blocked by account delay is tested for readiness. + let use_case = + UseCase::new_with_account(self.output_idx, address, account); + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + vacant.insert(use_case); + }, + } + } else { + // Account tracked and not empty, so appending a new txn to it won't affect positions + // in delay queues + account.queue_txn(input_idx, txn); + } + }, + None => { + // Account not previously tracked. + let account = Account::new_with_txn(self.output_idx + 1, input_idx, txn); + // Account didn't exist before, so use case must have been tracked, otherwise the + // txn whould've been selected for output, bypassing the queue. + let use_case = self.use_cases.expect_mut(&use_case_key); + if use_case.is_empty() { + self.use_case_placeholders_by_delay + .strict_remove(&use_case.delay_key()); + } else { + self.use_cases_by_delay.strict_remove(&use_case.delay_key()); + } + use_case.add_account(address, &account); + + self.accounts.strict_insert(address, account); + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key.clone()); + }, + } + } + + /// Txn from input queue directly selected for output, needs to bump delays for relevant + /// account and use case. + fn update_delays_for_selected_txn( + &mut self, + input_idx: InputIdx, + address: AccountAddress, + use_case_key: UseCaseKey, + ) { + let account_try_delay_till = self.output_idx + 1 + self.config.sender_spread_factor(); + let use_case_try_delay_till = + self.output_idx + 1 + self.config.use_case_spread_factor(&use_case_key); + + match self.use_cases.entry(use_case_key.clone()) { + hash_map::Entry::Occupied(occupied) => { + let use_case = occupied.into_mut(); + // Txn wouldn't have been selected for output if the use case is empty (tracking + // for a try_delay_till > self.output_idx) + assert!(!use_case.is_empty()); + + self.use_cases_by_delay.strict_remove(&use_case.delay_key()); + use_case.update_try_delay_till(use_case_try_delay_till); + self.use_cases_by_delay + .strict_insert(use_case.delay_key(), use_case_key); + }, + hash_map::Entry::Vacant(vacant) => { + let use_case = UseCase::new_empty(use_case_try_delay_till, input_idx); + self.use_case_placeholders_by_delay + .strict_insert(use_case.delay_key(), use_case_key); + vacant.insert(use_case); + }, + } + + // Notice this function is called after the txn is selected for output due to no delaying + // needed, so the account must not have been tracked before, otherwise it wouldn't have been + // selected for output. + let new_account = Account::new_empty(account_try_delay_till, input_idx); + let new_account_delay_key = new_account.delay_key(); + self.accounts.strict_insert(address, new_account); + self.account_placeholders_by_delay + .strict_insert(new_account_delay_key, address); + } + + /// Return the txn back if relevant use case and sender are not subject to delaying. Otherwise, + /// Queue it up. + pub fn queue_or_return(&mut self, input_idx: InputIdx, txn: Txn) -> Option { + let address = txn.parse_sender(); + let account_opt = self.accounts.get_mut(&address); + let use_case_key = txn.parse_use_case(); + let use_case_opt = self.use_cases.get_mut(&use_case_key); + + let account_should_delay = account_opt.as_ref().map_or(false, |account| { + !account.is_empty() // needs delaying due to queued txns under the same account + || account.try_delay_till > self.output_idx + }); + let use_case_should_delay = use_case_opt + .as_ref() + .map_or(false, |use_case| use_case.try_delay_till > self.output_idx); + + if account_should_delay || use_case_should_delay { + self.queue_txn(input_idx, address, use_case_key, txn); + None + } else { + self.update_delays_for_selected_txn(input_idx, address, use_case_key); + Some(txn) + } + } +} diff --git a/consensus/src/transaction_shuffler/use_case_aware/iterator.rs b/consensus/src/transaction_shuffler/use_case_aware/iterator.rs new file mode 100644 index 0000000000000..b7cfa0fd1412b --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/iterator.rs @@ -0,0 +1,79 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_shuffler::use_case_aware::{ + delayed_queue::DelayedQueue, + types::{InputIdx, OutputIdx}, + Config, +}; +use aptos_types::transaction::use_case::UseCaseAwareTransaction; +use std::{collections::VecDeque, fmt::Debug}; + +#[derive(Debug)] +pub(super) struct ShuffledTransactionIterator { + input_queue: VecDeque, + delayed_queue: DelayedQueue, + input_idx: InputIdx, + output_idx: OutputIdx, +} + +impl ShuffledTransactionIterator +where + Txn: UseCaseAwareTransaction + Debug, +{ + pub(super) fn new(config: Config) -> Self { + Self { + input_queue: VecDeque::new(), + delayed_queue: DelayedQueue::new(config), + input_idx: 0, + output_idx: 0, + } + } + + pub(super) fn extended_with(mut self, txns: impl IntoIterator) -> Self { + self.input_queue.extend(txns); + self + } + + pub(super) fn select_next_txn(&mut self) -> Option { + let ret = self.select_next_txn_inner(); + if ret.is_some() { + self.output_idx += 1; + } + ret + } + + pub(super) fn select_next_txn_inner(&mut self) -> Option { + self.delayed_queue.bump_output_idx(self.output_idx); + + // 1. if anything delayed became ready, return it + if let Some(txn) = self.delayed_queue.pop_head(true) { + return Some(txn); + } + + // 2. Otherwise, seek in the input queue for something that shouldn't be delayed due to either + // the sender or the use case. + while let Some(txn) = self.input_queue.pop_front() { + let input_idx = self.input_idx; + self.input_idx += 1; + + if let Some(txn) = self.delayed_queue.queue_or_return(input_idx, txn) { + return Some(txn); + } + } + + // 3. If nothing is ready, return the next eligible from the delay queue + self.delayed_queue.pop_head(false) + } +} + +impl Iterator for ShuffledTransactionIterator +where + Txn: UseCaseAwareTransaction + Debug, +{ + type Item = Txn; + + fn next(&mut self) -> Option { + self.select_next_txn() + } +} diff --git a/consensus/src/transaction_shuffler/use_case_aware/mod.rs b/consensus/src/transaction_shuffler/use_case_aware/mod.rs new file mode 100644 index 0000000000000..2c268e7e8a90c --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/mod.rs @@ -0,0 +1,48 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_shuffler::TransactionShuffler; +use aptos_types::transaction::{use_case::UseCaseKey, SignedTransaction}; +use iterator::ShuffledTransactionIterator; + +pub(crate) mod iterator; +pub(crate) mod types; +pub(crate) mod utils; + +pub(crate) mod delayed_queue; +#[cfg(test)] +mod tests; + +#[derive(Clone, Debug, Default)] +pub(crate) struct Config { + pub sender_spread_factor: usize, + pub platform_use_case_spread_factor: usize, + pub user_use_case_spread_factor: usize, +} + +impl Config { + pub(crate) fn sender_spread_factor(&self) -> usize { + self.sender_spread_factor + } + + pub(crate) fn use_case_spread_factor(&self, use_case_key: &UseCaseKey) -> usize { + use UseCaseKey::*; + + match use_case_key { + Platform => self.platform_use_case_spread_factor, + ContractAddress(..) | Others => self.user_use_case_spread_factor, + } + } +} + +pub struct UseCaseAwareShuffler { + pub config: Config, +} + +impl TransactionShuffler for UseCaseAwareShuffler { + fn shuffle(&self, txns: Vec) -> Vec { + ShuffledTransactionIterator::new(self.config.clone()) + .extended_with(txns) + .collect() + } +} diff --git a/consensus/src/transaction_shuffler/use_case_aware/tests/manual.rs b/consensus/src/transaction_shuffler/use_case_aware/tests/manual.rs new file mode 100644 index 0000000000000..5b2a7d1a25c83 --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/tests/manual.rs @@ -0,0 +1,203 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_shuffler::use_case_aware::{ + iterator::ShuffledTransactionIterator, + tests, + tests::{Account, Contract}, + Config, +}; +use itertools::Itertools; + +const PP: Contract = Contract::Platform; +const OO: Contract = Contract::Others; +const C1: Contract = Contract::User(0xF1); +const C2: Contract = Contract::User(0xF2); +const C3: Contract = Contract::User(0xF3); +const A1: Account = Account(1); +const A2: Account = Account(2); +const A3: Account = Account(3); +const A4: Account = Account(4); + +fn assert_shuffle_result( + config: Config, + txns: impl IntoIterator, + expected_order: impl IntoIterator, +) { + let txns = tests::into_txns(txns); + let actual_order = ShuffledTransactionIterator::new(config) + .extended_with(txns) + .map(|txn| txn.original_idx) + .collect_vec(); + let expected_order = expected_order.into_iter().collect_vec(); + assert_eq!(actual_order, expected_order, "actual != expected"); +} + +fn three_senders_txns() -> [(Contract, Account); 10] { + [ + // 5 txns from A1 + (PP, A1), + (OO, A1), + (C1, A1), + (C2, A1), + (C3, A1), + // 3 txns from A2 + (PP, A2), + (PP, A2), + (PP, A2), + // 2 txns from A3 + (C1, A3), + (C1, A3), + ] +} + +#[test] +fn test_no_spreading() { + let config = Config { + sender_spread_factor: 0, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor: 0, + }; + let txns = three_senders_txns(); + + assert_shuffle_result(config, txns, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +} + +#[test] +fn test_spread_by_sender_1() { + let config = Config { + sender_spread_factor: 1, + // ignore use case conflicts + platform_use_case_spread_factor: 0, + // ignore use case conflicts + user_use_case_spread_factor: 0, + }; + let txns = three_senders_txns(); + + assert_shuffle_result(config, txns, [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]); +} + +#[test] +fn test_spread_by_sender_by_large_factor() { + for sender_spread_factor in [2, 3, 4] { + let config = Config { + sender_spread_factor, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor: 0, + }; + let txns = three_senders_txns(); + + assert_shuffle_result(config, txns, [0, 5, 8, 1, 6, 9, 2, 7, 3, 4]); + } +} + +fn three_contracts_txns() -> [(Contract, Account); 10] { + [ + // 5 txns from C1 + (C1, A1), + (C1, A1), + (C1, A1), + (C1, A1), + (C1, A1), + // 3 txns from C2 + (C2, A2), + (C2, A2), + (C2, A2), + // 2 txns from C3 + (C3, A3), + (C3, A3), + ] +} + +#[test] +fn test_spread_by_use_case_1() { + let config = Config { + sender_spread_factor: 0, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor: 1, + }; + let txns = three_contracts_txns(); + + assert_shuffle_result(config, txns, [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]); +} + +#[test] +fn test_spread_by_use_case_by_large_factor() { + for user_use_case_spread_factor in [2, 3, 4] { + let config = Config { + sender_spread_factor: 0, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor, + }; + let txns = three_contracts_txns(); + + assert_shuffle_result(config, txns, [0, 5, 8, 1, 6, 9, 2, 7, 3, 4]); + } +} + +fn user_and_platform_use_cases() -> [(Contract, Account); 10] { + [ + // 5 txns from C1 + (C1, A1), + (C1, A1), + (C1, A1), + (C1, A1), + (C1, A1), + // 3 txns from C2 + (PP, A2), + (PP, A2), + (PP, A2), + // 2 txns from C3 + (PP, A3), + (PP, A3), + ] +} + +#[test] +fn test_platform_txn_priority_0() { + let config = Config { + sender_spread_factor: 0, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor: 3, + }; + let txns = user_and_platform_use_cases(); + + assert_shuffle_result(config, txns, [0, 5, 6, 7, 1, 8, 9, 2, 3, 4]); +} + +#[test] +fn test_platform_txn_priority_1() { + let config = Config { + sender_spread_factor: 0, + platform_use_case_spread_factor: 1, + user_use_case_spread_factor: 3, + }; + let txns = user_and_platform_use_cases(); + + assert_shuffle_result(config, txns, [0, 5, 6, 1, 7, 8, 2, 9, 3, 4]); +} + +#[test] +fn test_spread_sender_within_use_case() { + let config = Config { + sender_spread_factor: 2, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor: 1, + }; + let txns = [ + // 5 txns from C1 + (C1, A1), + (C1, A1), + (C1, A2), + (C1, A2), + (C1, A2), + // 3 txns from C2 + (C2, A3), + (C2, A3), + (C2, A3), + (C2, A4), + (C2, A4), + ]; + + assert_shuffle_result(config, txns, [0, 5, 2, 8, 1, 6, 3, 9, 4, 7]); +} diff --git a/consensus/src/transaction_shuffler/use_case_aware/tests/mod.rs b/consensus/src/transaction_shuffler/use_case_aware/tests/mod.rs new file mode 100644 index 0000000000000..e409ae9772ce9 --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/tests/mod.rs @@ -0,0 +1,94 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_types::transaction::use_case::{UseCaseAwareTransaction, UseCaseKey}; +use move_core_types::account_address::AccountAddress; +use proptest_derive::Arbitrary; +use std::fmt::Debug; + +mod manual; +mod proptests; + +#[derive(Arbitrary)] +enum Contract { + Platform, + Others, + User(u8), +} + +impl Debug for Contract { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use Contract::*; + + write!(f, "c{}", match self { + Platform => "PP".to_string(), + Others => "OO".to_string(), + User(addr) => hex::encode_upper(addr.to_be_bytes()), + }) + } +} + +#[derive(Arbitrary)] +struct Account(u8); + +impl Debug for Account { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "a{}", hex::encode_upper(self.0.to_be_bytes())) + } +} + +impl Account { + fn as_account_address(&self) -> AccountAddress { + let mut addr = [0u8; 32]; + addr[31..].copy_from_slice(&self.0.to_be_bytes()); + AccountAddress::new(addr) + } +} + +struct Transaction { + contract: Contract, + sender: Account, + original_idx: usize, +} + +impl Debug for Transaction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "t{}:{:?}{:?}", + self.original_idx, self.contract, self.sender + ) + } +} + +impl UseCaseAwareTransaction for Transaction { + fn parse_sender(&self) -> AccountAddress { + self.sender.as_account_address() + } + + fn parse_use_case(&self) -> UseCaseKey { + use UseCaseKey::*; + + match self.contract { + Contract::Platform => Platform, + Contract::Others => Others, + Contract::User(c) => ContractAddress(Account(c).as_account_address()), + } + } +} + +fn into_txns(txns: impl IntoIterator) -> Vec { + let mut original_idx = 0; + txns.into_iter() + .map(|(contract, sender)| { + let txn = Transaction { + contract, + sender, + original_idx, + }; + + original_idx += 1; + txn + }) + .collect() +} diff --git a/consensus/src/transaction_shuffler/use_case_aware/tests/proptests.rs b/consensus/src/transaction_shuffler/use_case_aware/tests/proptests.rs new file mode 100644 index 0000000000000..ef84499776d34 --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/tests/proptests.rs @@ -0,0 +1,49 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_shuffler::use_case_aware::{ + iterator::ShuffledTransactionIterator, + tests::{into_txns, Account, Contract, Transaction}, + Config, +}; +use itertools::Itertools; +use proptest::{collection::vec, prelude::*}; +use std::collections::HashMap; + +fn txn_indices_by_account(txns: &[Transaction]) -> HashMap> { + txns.iter() + .map(|txn| (txn.sender.0, txn.original_idx)) + .into_group_map() +} + +proptest! { + #[test] + fn test_no_panic( + txns in vec(any::<(Contract, Account)>(), 0..100) + .prop_map(into_txns), + sender_factor in 0..100usize, + platform_factor in 0..100usize, + user_contract_factor in 0..100usize, + ) { + let num_txns = txns.len(); + let txns_by_account = txn_indices_by_account(&txns); + + let config = Config { + sender_spread_factor: sender_factor, + platform_use_case_spread_factor: platform_factor, + user_use_case_spread_factor: user_contract_factor, + }; + + let shuffled_txns = ShuffledTransactionIterator::new(config) + .extended_with(txns) + .collect_vec(); + + prop_assert_eq!( + txn_indices_by_account(&shuffled_txns), + txns_by_account + ); + + let txn_indices = shuffled_txns.into_iter().map(|txn| txn.original_idx).sorted().collect_vec(); + prop_assert_eq!(txn_indices, (0..num_txns).collect_vec()); + } +} diff --git a/consensus/src/transaction_shuffler/use_case_aware/types.rs b/consensus/src/transaction_shuffler/use_case_aware/types.rs new file mode 100644 index 0000000000000..d68b67d8ffaa1 --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/types.rs @@ -0,0 +1,5 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) type InputIdx = usize; +pub(crate) type OutputIdx = usize; diff --git a/consensus/src/transaction_shuffler/use_case_aware/utils.rs b/consensus/src/transaction_shuffler/use_case_aware/utils.rs new file mode 100644 index 0000000000000..a0e75d075766e --- /dev/null +++ b/consensus/src/transaction_shuffler/use_case_aware/utils.rs @@ -0,0 +1,43 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use std::{ + collections::{BTreeMap, HashMap}, + hash::Hash, +}; + +pub(crate) trait StrictMap { + fn strict_insert(&mut self, key: K, value: V); + + fn strict_remove(&mut self, key: &K); + + fn expect_mut(&mut self, key: &K) -> &mut V; +} + +impl StrictMap for HashMap { + fn strict_insert(&mut self, key: K, value: V) { + assert!(self.insert(key, value).is_none()) + } + + fn strict_remove(&mut self, key: &K) { + assert!(self.remove(key).is_some()) + } + + fn expect_mut(&mut self, key: &K) -> &mut V { + self.get_mut(key).expect("Known to exist.") + } +} + +impl StrictMap for BTreeMap { + fn strict_insert(&mut self, key: K, value: V) { + assert!(self.insert(key, value).is_none()) + } + + fn strict_remove(&mut self, key: &K) { + assert!(self.remove(key).is_some()) + } + + fn expect_mut(&mut self, key: &K) -> &mut V { + self.get_mut(key).expect("Known to exist.") + } +} diff --git a/consensus/src/twins/twins_node.rs b/consensus/src/twins/twins_node.rs index 4d347012544db..a1b511cb8c915 100644 --- a/consensus/src/twins/twins_node.rs +++ b/consensus/src/twins/twins_node.rs @@ -8,7 +8,7 @@ use crate::{ network::NetworkTask, network_interface::{ConsensusNetworkClient, DIRECT_SEND, RPC}, network_tests::{NetworkPlayground, TwinId}, - payload_manager::PayloadManager, + payload_manager::DirectMempoolPayloadManager, pipeline::buffer_manager::OrderedBlocks, quorum_store::quorum_store_db::MockQuorumStoreDB, rand::rand_gen::storage::in_memory::InMemRandDb, @@ -118,7 +118,7 @@ impl SMRNode { let reconfig_listener = ReconfigNotificationListener { notification_receiver: reconfig_events, }; - let _commit_notifier = Arc::from(PayloadManager::DirectMempool); + let _commit_notifier = Arc::from(DirectMempoolPayloadManager::new()); let mut configs = HashMap::new(); configs.insert( ValidatorSet::CONFIG_ID, diff --git a/consensus/src/util/db_tool.rs b/consensus/src/util/db_tool.rs index 5809747c4bcb7..6ae051b251997 100644 --- a/consensus/src/util/db_tool.rs +++ b/consensus/src/util/db_tool.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::unwrap_used)] + use crate::{ consensusdb::ConsensusDB, quorum_store::{ @@ -9,7 +11,7 @@ use crate::{ }, }; use anyhow::{bail, Result}; -use aptos_consensus_types::{block::Block, common::Payload, proof_of_store::ProofOfStore}; +use aptos_consensus_types::{block::Block, common::Payload}; use aptos_crypto::HashValue; use aptos_types::transaction::{SignedTransaction, Transaction}; use clap::Parser; @@ -44,6 +46,7 @@ impl Command { let mut txns = Vec::new(); for block in blocks { let id = block.id(); + #[allow(clippy::unwrap_in_result)] if self.block_id.is_none() || id == self.block_id.unwrap() { txns.extend( extract_txns_from_block(&block, &all_batches)? @@ -58,49 +61,78 @@ impl Command { } } +fn extract_txns_from_quorum_store( + digests: impl Iterator, + all_batches: &HashMap, +) -> anyhow::Result> { + let mut block_txns = Vec::new(); + for digest in digests { + if let Some(batch) = all_batches.get(&digest) { + if let Some(txns) = batch.payload() { + block_txns.extend(txns); + } else { + bail!("Payload is not found for batch ({digest})."); + } + } else { + bail!("Batch ({digest}) is not found."); + } + } + Ok(block_txns) +} + pub fn extract_txns_from_block<'a>( block: &'a Block, all_batches: &'a HashMap, ) -> anyhow::Result> { match block.payload().as_ref() { - Some(payload) => { - let mut block_txns = Vec::new(); - - let extract_txns_from_proof_stores = move |proofs: &Vec| { - for proof in proofs { - let digest = proof.digest(); - if let Some(batch) = all_batches.get(digest) { - if let Some(txns) = batch.payload() { - block_txns.extend(txns); - } else { - bail!("Payload is not found for batch ({digest})."); - } - } else { - bail!("Batch ({digest}) is not found."); - } + Some(payload) => match payload { + Payload::DirectMempool(_) => { + bail!("DirectMempool is not supported."); + }, + Payload::InQuorumStore(proof_with_data) => extract_txns_from_quorum_store( + proof_with_data.proofs.iter().map(|proof| *proof.digest()), + all_batches, + ), + Payload::InQuorumStoreWithLimit(proof_with_data) => extract_txns_from_quorum_store( + proof_with_data + .proof_with_data + .proofs + .iter() + .map(|proof| *proof.digest()), + all_batches, + ), + Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) => { + let mut all_txns = extract_txns_from_quorum_store( + proof_with_data.proofs.iter().map(|proof| *proof.digest()), + all_batches, + ) + .unwrap(); + for (_, txns) in inline_batches { + all_txns.extend(txns); } - Ok(block_txns) - }; - - match payload { - Payload::DirectMempool(_) => { - bail!("DirectMempool is not supported."); - }, - Payload::InQuorumStore(proof_with_data) => { - extract_txns_from_proof_stores(&proof_with_data.proofs) - }, - Payload::InQuorumStoreWithLimit(proof_with_data) => { - extract_txns_from_proof_stores(&proof_with_data.proof_with_data.proofs) - }, - Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) => { - let mut all_txns = - extract_txns_from_proof_stores(&proof_with_data.proofs).unwrap(); - for (_, txns) in inline_batches { - all_txns.extend(txns); - } - Ok(all_txns) - }, - } + Ok(all_txns) + }, + Payload::OptQuorumStore(opt_qs_payload) => { + let mut all_txns = extract_txns_from_quorum_store( + opt_qs_payload + .proof_with_data() + .iter() + .map(|proof| *proof.digest()), + all_batches, + ) + .unwrap(); + all_txns.extend( + extract_txns_from_quorum_store( + opt_qs_payload + .opt_batches() + .iter() + .map(|info| *info.digest()), + all_batches, + ) + .unwrap(), + ); + Ok(all_txns) + }, }, None => Ok(vec![]), } diff --git a/consensus/src/util/time_service.rs b/consensus/src/util/time_service.rs index d90fd6a54c008..2114f479dc04d 100644 --- a/consensus/src/util/time_service.rs +++ b/consensus/src/util/time_service.rs @@ -80,8 +80,14 @@ where T: Send + 'static, { fn run(&mut self) -> Pin + Send>> { - let mut sender = self.sender.take().unwrap(); - let message = self.message.take().unwrap(); + let mut sender = self + .sender + .take() + .expect("Expect to be able to take sender"); + let message = self + .message + .take() + .expect("Expect to be able to take message"); let r = async move { if let Err(e) = sender.send(message).await { error!("Error on send: {:?}", e); diff --git a/crates/aptos-build-info/build.rs b/crates/aptos-build-info/build.rs index b3339b0eec60f..222f8098919ea 100644 --- a/crates/aptos-build-info/build.rs +++ b/crates/aptos-build-info/build.rs @@ -9,6 +9,11 @@ fn main() -> shadow_rs::SdResult<()> { std::env::var("CARGO_CFG_TOKIO_UNSTABLE").is_ok() ); println!("cargo:rerun-if-changed=build.rs"); - println!("cargo:rerun-if-changed=../../.git/HEAD"); + // Check for this path first, otherwise it will force a rebuild every time + // https://github.com/rust-lang/cargo/issues/4213 + let git_head = std::path::Path::new("../../.git/HEAD"); + if git_head.exists() { + println!("cargo:rerun-if-changed=../../.git/HEAD"); + } shadow_rs::new() } diff --git a/crates/aptos-crypto/src/hash.rs b/crates/aptos-crypto/src/hash.rs index 74096aaa4fbaf..c484b41ca6c43 100644 --- a/crates/aptos-crypto/src/hash.rs +++ b/crates/aptos-crypto/src/hash.rs @@ -105,7 +105,7 @@ use more_asserts::debug_assert_lt; use once_cell::sync::{Lazy, OnceCell}; #[cfg(any(test, feature = "fuzzing"))] use proptest_derive::Arbitrary; -use rand::{rngs::OsRng, Rng}; +use rand::{distributions::Standard, prelude::Distribution, rngs::OsRng, Rng}; use serde::{de, ser, Deserialize, Serialize}; use std::{ self, @@ -159,15 +159,12 @@ impl HashValue { /// Create a cryptographically random instance. pub fn random() -> Self { - let mut rng = OsRng; - let hash: [u8; HashValue::LENGTH] = rng.gen(); - HashValue { hash } + Self::random_with_rng(&mut OsRng) } /// Creates a random instance with given rng. Useful in unit tests. pub fn random_with_rng(rng: &mut R) -> Self { - let hash: [u8; HashValue::LENGTH] = rng.gen(); - HashValue { hash } + rng.gen() } /// Convenience function that computes a `HashValue` internally equal to @@ -413,6 +410,12 @@ impl FromStr for HashValue { } } +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HashValue { + HashValue { hash: rng.gen() } + } +} + /// Parse error when attempting to construct a HashValue #[derive(Clone, Copy, Debug)] pub struct HashValueParseError; @@ -636,6 +639,16 @@ define_hasher! { ) } +define_hasher! { + /// The hasher used to compute the hash of an internal node in the Sparse Merkle Tree. + ( + HexyHasher, + HEXY_HASHER, + HEXY_SEED, + b"Hexy" + ) +} + define_hasher! { /// The hasher used as a placeholder. ( @@ -666,6 +679,10 @@ pub static ACCUMULATOR_PLACEHOLDER_HASH: Lazy = pub static SPARSE_MERKLE_PLACEHOLDER_HASH: Lazy = Lazy::new(|| create_literal_hash("SPARSE_MERKLE_PLACEHOLDER_HASH")); +/// Placeholder hash of hot state tier Merkle Tree. +pub static HOT_STATE_PLACE_HOLDER_HASH: Lazy = + Lazy::new(|| create_literal_hash("HOT_STATE_PLACEHOLDER_HASH")); + /// Block id reserved as the id of parent block of the genesis block. pub static PRE_GENESIS_BLOCK_ID: Lazy = Lazy::new(|| create_literal_hash("PRE_GENESIS_BLOCK_ID")); diff --git a/crates/aptos-crypto/src/unit_tests/hash_test.rs b/crates/aptos-crypto/src/unit_tests/hash_test.rs index 7be5d926fef4b..47253749760f1 100644 --- a/crates/aptos-crypto/src/unit_tests/hash_test.rs +++ b/crates/aptos-crypto/src/unit_tests/hash_test.rs @@ -71,6 +71,7 @@ fn test_random_with_rng() { seed[..4].copy_from_slice(&[1, 2, 3, 4]); let hash1; let hash2; + let hash3; { let mut rng: StdRng = SeedableRng::from_seed(seed); hash1 = HashValue::random_with_rng(&mut rng); @@ -79,7 +80,12 @@ fn test_random_with_rng() { let mut rng: StdRng = SeedableRng::from_seed(seed); hash2 = HashValue::random_with_rng(&mut rng); } + { + let mut rng: StdRng = SeedableRng::from_seed(seed); + hash3 = rng.gen(); + } assert_eq!(hash1, hash2); + assert_eq!(hash1, hash3); } #[test] diff --git a/crates/aptos-dkg/Cargo.toml b/crates/aptos-dkg/Cargo.toml index 67f8660a49e3c..e6463336d1805 100644 --- a/crates/aptos-dkg/Cargo.toml +++ b/crates/aptos-dkg/Cargo.toml @@ -2,7 +2,7 @@ name = "aptos-dkg" version = "0.1.0" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +license = { workspace = true } [dependencies] anyhow = { workspace = true } diff --git a/crates/aptos-dkg/benches/crypto.rs b/crates/aptos-dkg/benches/crypto.rs index f465d1d0940bf..c9f73c629e05e 100644 --- a/crates/aptos-dkg/benches/crypto.rs +++ b/crates/aptos-dkg/benches/crypto.rs @@ -45,7 +45,11 @@ pub fn crypto_group(c: &mut Criterion) { fft_assign_bench(thresh, &mut group); gt_multiexp_naive(thresh, &mut group); + g1_multiexp_naive(thresh, &mut group); + g2_multiexp_naive(thresh, &mut group); + g1_multiexp(thresh, &mut group); + n_g1_double_exp(thresh, &mut group); g2_multiexp(thresh, &mut group); accumulator_poly(thresh, &mut group); @@ -54,7 +58,7 @@ pub fn crypto_group(c: &mut Criterion) { // Derived from `print_best_worst_avg_case_subsets` in `tests/secret_sharing_config.rs`. const AVG_CASE: usize = 74; - for n in [1, 2, 4, 8, 16, 32, 64, AVG_CASE, 128] { + for n in [1, 2, 3, 4, 8, 16, 32, 64, AVG_CASE, 128] { multipairing(n, &mut group); for num_threads in [1, 2, 4, 8, 16, 32] { parallel_multipairing(n, &mut group, num_threads); @@ -460,6 +464,31 @@ fn g1_multiexp(n: usize, g: &mut BenchmarkGroup) { }); } +fn n_g1_double_exp(n: usize, g: &mut BenchmarkGroup) { + let mut rng = thread_rng(); + + g.throughput(Throughput::Elements(n as u64)); + + g.bench_function(BenchmarkId::new("n_g1_double_exp", n), move |b| { + b.iter_with_setup( + || { + let mut points_and_scalars = Vec::with_capacity(n); + for _ in 0..n { + points_and_scalars + .push((random_g1_points(2, &mut rng), random_scalars(2, &mut rng))); + } + + points_and_scalars + }, + |points_and_scalars| { + for (points, scalars) in points_and_scalars { + g1_multi_exp(points.as_slice(), scalars.as_ref()); + } + }, + ) + }); +} + fn g2_multiexp(n: usize, g: &mut BenchmarkGroup) { let mut rng = thread_rng(); @@ -506,6 +535,56 @@ fn gt_multiexp_naive(n: usize, g: &mut BenchmarkGroup) { }); } +fn g1_multiexp_naive(n: usize, g: &mut BenchmarkGroup) { + let mut rng = thread_rng(); + + g.throughput(Throughput::Elements(n as u64)); + + g.bench_function(BenchmarkId::new("g1_multiexp_naive", n), move |b| { + b.iter_with_setup( + || { + let points = random_g1_points(n, &mut rng); + + let scalars = random_scalars(n, &mut rng); + + (points, scalars) + }, + |(points, scalars)| { + points + .into_iter() + .zip(scalars.into_iter()) + .map(|(p, s)| p.mul(s)) + .sum::() + }, + ) + }); +} + +fn g2_multiexp_naive(n: usize, g: &mut BenchmarkGroup) { + let mut rng = thread_rng(); + + g.throughput(Throughput::Elements(n as u64)); + + g.bench_function(BenchmarkId::new("g2_multiexp_naive", n), move |b| { + b.iter_with_setup( + || { + let points = random_g2_points(n, &mut rng); + + let scalars = random_scalars(n, &mut rng); + + (points, scalars) + }, + |(points, scalars)| { + points + .into_iter() + .zip(scalars.into_iter()) + .map(|(p, s)| p.mul(s)) + .sum::() + }, + ) + }); +} + criterion_group!( name = benches; config = Criterion::default().sample_size(10); diff --git a/crates/aptos-dkg/benches/pvss.rs b/crates/aptos-dkg/benches/pvss.rs index bcf76ca05b648..1bca3a728d919 100644 --- a/crates/aptos-dkg/benches/pvss.rs +++ b/crates/aptos-dkg/benches/pvss.rs @@ -6,6 +6,7 @@ use aptos_crypto::Uniform; use aptos_dkg::{ + algebra::evaluation_domain::BatchEvaluationDomain, pvss, pvss::{ test_utils, @@ -17,7 +18,7 @@ use aptos_dkg::{ transcript::{MalleableTranscript, Transcript}, SecretSharingConfig, }, - WeightedConfig, + LowDegreeTest, WeightedConfig, }, }; use criterion::{ @@ -43,6 +44,33 @@ pub fn all_groups(c: &mut Criterion) { // let d = pvss_group::>(&wc, c); // weighted_pvss_group(&wc, d, c); } + + // LDT + ldt_group(c); +} + +pub fn ldt_group(c: &mut Criterion) { + let mut rng = thread_rng(); + + for sc in get_threshold_configs_for_benchmarking() { + let mut group = c.benchmark_group("ldt"); + + group.bench_function(format!("dual_code_word/{}", sc), move |b| { + b.iter_with_setup( + || { + let n = sc.get_total_num_players(); + let t = sc.get_threshold(); + let batch_dom = BatchEvaluationDomain::new(n); + + (n, t, batch_dom) + }, + |(n, t, batch_dom)| { + let ldt = LowDegreeTest::random(&mut rng, t, n, true, &batch_dom); + ldt.dual_code_word(); + }, + ) + }); + } } pub fn pvss_group( diff --git a/crates/aptos-dkg/benches/weighted_vuf.rs b/crates/aptos-dkg/benches/weighted_vuf.rs index d838f910222bd..81bf126e7e8ed 100644 --- a/crates/aptos-dkg/benches/weighted_vuf.rs +++ b/crates/aptos-dkg/benches/weighted_vuf.rs @@ -26,17 +26,16 @@ use aptos_runtimes::spawn_rayon_thread_pool; use blstrs::{G1Projective, G2Projective, Scalar}; use core::iter::zip; use criterion::{ - criterion_group, criterion_main, - measurement::{Measurement, WallTime}, - BenchmarkGroup, Criterion, + black_box, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; use rand::{rngs::ThreadRng, thread_rng}; +use std::time::Instant; const BENCH_MSG: &[u8; 36] = b"some dummy message for the benchmark"; pub fn all_groups(c: &mut Criterion) { let mut group = c.benchmark_group("wvuf/das-pinkas-sk-in-g1"); - let bench_cases = wvuf_benches::(&mut group); + let bench_cases = wvuf_benches::(&mut group); group.finish(); let mut group = c.benchmark_group("wvuf/das-pinkas-sk-in-g1"); @@ -44,7 +43,7 @@ pub fn all_groups(c: &mut Criterion) { group.finish(); let mut group = c.benchmark_group("wvuf/insecure-field-bls"); - wvuf_benches::, bls::BlsWUF, WallTime>(&mut group); + wvuf_benches::, bls::BlsWUF>(&mut group); group.finish(); } @@ -125,9 +124,8 @@ pub fn wvuf_benches< PubKeyShare = WT::DealtPubKeyShare, SecretKeyShare = WT::DealtSecretKeyShare, >, - M: Measurement, >( - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, ) -> Vec<( WeightedConfig, ::PublicParameters, @@ -198,20 +196,63 @@ where } for (wc, vuf_pp, sk, pk, sks, pks, asks, apks, deltas) in &bench_cases { - wvuf_augment_random_keypair::( + wvuf_augment_random_keypair::( &wc, &vuf_pp, &sks, &pks, group, &mut rng, ); - wvuf_augment_all_pubkeys::(&wc, &vuf_pp, &pks, &deltas, group); + wvuf_augment_all_pubkeys::(&wc, &vuf_pp, &pks, &deltas, group); - wvuf_augment_random_pubkey::( + wvuf_augment_random_pubkey::( &wc, &vuf_pp, &pks, &deltas, group, &mut rng, ); - wvuf_create_share::(&wc, &asks, group, &mut rng); + wvuf_create_share_random::(&wc, &asks, group, &mut rng); + wvuf_create_share_average::(&wc, &asks, group); - wvuf_verify_share::(&wc, &vuf_pp, &asks, &apks, group, &mut rng); + let min_weight_player = wc.get_min_weight_player(); + wvuf_create_share_specific::( + &wc, + &asks, + group, + &min_weight_player, + "min-weight", + ); + let max_weight_player = wc.get_max_weight_player(); + wvuf_create_share_specific::( + &wc, + &asks, + group, + &max_weight_player, + "max-weight", + ); + // TODO: should change WVUF trait to support some kind of multi-threaded share verification, + // since in practice that's what we would do on the validators + // i.e., https://github.com/aptos-labs/aptos-core/blob/8ff40c8dd6505dea5e4b2a28cbbe7b97723b0ec2/consensus/src/rand/rand_gen/rand_manager.rs#L221 + wvuf_verify_share_random::( + &wc, &vuf_pp, &asks, &apks, group, &mut rng, + ); + wvuf_verify_share_average::(&wc, &vuf_pp, &asks, &apks, group); + wvuf_verify_share_specific::( + &wc, + &vuf_pp, + &asks, + &apks, + &min_weight_player, + "min-weight", + group, + ); + wvuf_verify_share_specific::( + &wc, + &vuf_pp, + &asks, + &apks, + &max_weight_player, + "max-weight", + group, + ); + + // benchmarks the sequence of WVUF::verify_share calls on shares from a specific subset of players let bc: Vec<(fn(&WeightedConfig, &mut ThreadRng) -> Vec, String)> = vec![ ( WeightedConfig::get_random_eligible_subset_of_players, @@ -228,8 +269,7 @@ where ]; for (pick_subset_fn, subset_type) in bc { - // best-case aggregation times (pick players with largest weights) - wvuf_aggregate_shares::( + wvuf_aggregate_shares::( &wc, &asks, &apks, @@ -239,7 +279,18 @@ where &subset_type, ); - wvuf_verify_proof::( + wvuf_many_verify_shares::( + &wc, + &vuf_pp, + &asks, + &apks, + pick_subset_fn, + &subset_type, + group, + &mut rng, + ); + + wvuf_verify_proof::( &wc, &vuf_pp, &pk, @@ -252,7 +303,7 @@ where ); for num_threads in [1, 2, 4, 8, 16, 32] { - wvuf_derive_eval::( + wvuf_derive_eval::( &wc, &vuf_pp, &asks, @@ -266,7 +317,7 @@ where } } - wvuf_eval::(&wc, &sk, group); + wvuf_eval::(&wc, &sk, group); } bench_cases @@ -280,14 +331,13 @@ fn wvuf_augment_random_keypair< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, vuf_pp: &WVUF::PublicParameters, sks: &Vec, pks: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, @@ -312,14 +362,13 @@ fn wvuf_augment_all_pubkeys< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, vuf_pp: &WVUF::PublicParameters, pks: &Vec, deltas: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, { @@ -343,14 +392,13 @@ fn wvuf_augment_random_pubkey< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, vuf_pp: &WVUF::PublicParameters, pks: &Vec, deltas: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, @@ -370,7 +418,7 @@ fn wvuf_augment_random_pubkey< }); } -fn wvuf_create_share< +fn wvuf_create_share_random< WT: Transcript, WVUF: WeightedVUF< SecretKey = WT::DealtSecretKey, @@ -378,16 +426,15 @@ fn wvuf_create_share< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( wc: &WeightedConfig, asks: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, { - group.bench_function(format!("create_share/{}", wc), move |b| { + group.bench_function(format!("create_share_random/{}", wc), move |b| { b.iter_with_setup( || { let player = wc.get_random_player(rng); @@ -398,7 +445,74 @@ fn wvuf_create_share< }); } -fn wvuf_verify_share< +fn wvuf_create_share_specific< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + wc: &WeightedConfig, + asks: &Vec, + group: &mut BenchmarkGroup, + player: &Player, + name: &str, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + group.bench_function(format!("create_share_specific/{}/{}", name, wc), move |b| { + b.iter_with_setup( + || &asks[player.id], + |ask| WVUF::create_share(ask, BENCH_MSG), + ) + }); +} + +fn wvuf_create_share_average< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + wc: &WeightedConfig, + asks: &Vec, + group: &mut BenchmarkGroup, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + group.bench_function(format!("create_share_average/{}", wc), move |b| { + let n = wc.get_total_num_players(); + b.iter_custom(|iters| { + let shares: Vec<_> = (0..n) + .map(|i| { + let player = wc.get_player(i); + &asks[player.id] + }) + .collect(); + + let start = Instant::now(); + for _i in 0..iters { + black_box( + shares + .iter() + .map(|ask| { + WVUF::create_share(ask, BENCH_MSG); + }) + .collect::>(), + ); + } + let total_duration = start.elapsed(); + total_duration / (n as u32) + }) + }); +} + +fn wvuf_verify_share_random< WT: Transcript, WVUF: WeightedVUF< SecretKey = WT::DealtSecretKey, @@ -406,18 +520,17 @@ fn wvuf_verify_share< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( wc: &WeightedConfig, vuf_pp: &WVUF::PublicParameters, asks: &Vec, apks: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, { - group.bench_function(format!("verify_share/{}", wc), move |b| { + group.bench_function(format!("verify_share_random/{}", wc), move |b| { b.iter_with_setup( || { let player = wc.get_random_player(rng); @@ -430,6 +543,112 @@ fn wvuf_verify_share< }); } +fn wvuf_verify_share_average< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + wc: &WeightedConfig, + vuf_pp: &WVUF::PublicParameters, + asks: &Vec, + apks: &Vec, + group: &mut BenchmarkGroup, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + group.bench_function(format!("verify_share_average/{}", wc), move |b| { + let n = wc.get_total_num_players(); + b.iter_custom(|iters| { + let shares: Vec<_> = (0..n) + .map(|i| { + let player = wc.get_player(i); + let ask = &asks[player.id]; + (WVUF::create_share(ask, BENCH_MSG), &apks[player.id]) + }) + .collect(); + + let start = Instant::now(); + for _i in 0..iters { + black_box( + shares + .iter() + .map(|(proof, apk)| WVUF::verify_share(vuf_pp, apk, BENCH_MSG, &proof)) + .collect::>>(), + ); + } + let total_duration = start.elapsed(); + total_duration / (n as u32) + }) + }); +} + +fn wvuf_many_verify_shares< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + PubKey = WT::DealtPubKey, + >, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + wc: &WeightedConfig, + vuf_pp: &WVUF::PublicParameters, + asks: &Vec, + apks: &Vec, + pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, + name: &str, + group: &mut BenchmarkGroup, + rng: &mut R, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + group.bench_function(format!("many_verify_shares/{}/{}", name, wc), move |b| { + b.iter_with_setup( + || get_apks_and_proofs::(&wc, &asks, apks, rng, pick_subset_fn), + |apks_and_proofs| { + for (_, apk, proof) in apks_and_proofs { + WVUF::verify_share(vuf_pp, &apk, BENCH_MSG, &proof).unwrap(); + } + }, + ) + }); +} + +fn wvuf_verify_share_specific< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + wc: &WeightedConfig, + vuf_pp: &WVUF::PublicParameters, + asks: &Vec, + apks: &Vec, + player: &Player, + name: &str, + group: &mut BenchmarkGroup, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + println!("Player weight: {:?}", wc.get_player_weight(player)); + let ask = &asks[player.id]; + let apk = &apks[player.id]; + group.bench_function(format!("verify_share_specific/{}/{}", name, wc), move |b| { + b.iter_with_setup( + || WVUF::create_share(ask, BENCH_MSG), + |proof| WVUF::verify_share(vuf_pp, apk, BENCH_MSG, &proof), + ) + }); +} + fn wvuf_aggregate_shares< WT: Transcript, WVUF: WeightedVUF< @@ -439,13 +658,12 @@ fn wvuf_aggregate_shares< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, asks: &Vec, apks: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, subset_type: &String, @@ -474,7 +692,6 @@ fn wvuf_verify_proof< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, @@ -482,7 +699,7 @@ fn wvuf_verify_proof< pk: &WVUF::PubKey, asks: &Vec, apks: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, subset_type: &String, @@ -519,14 +736,13 @@ fn wvuf_derive_eval< SecretKeyShare = WT::DealtSecretKeyShare, >, R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, pp: &WVUF::PublicParameters, asks: &Vec, apks: &Vec, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, subset_type: &String, @@ -565,13 +781,12 @@ fn wvuf_derive_eval< fn pinkas_wvuf_derive_eval_collect_lagrange_shares_and_rks< R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, >( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, asks: &Vec<::AugmentedSecretKeyShare>, apks: &Vec<::AugmentedPubKeyShare>, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, subset_type: &str, @@ -606,15 +821,12 @@ fn pinkas_wvuf_derive_eval_collect_lagrange_shares_and_rks< ); } -fn pinkas_wvuf_derive_eval_rks_multiexps< - R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, ->( +fn pinkas_wvuf_derive_eval_rks_multiexps( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, asks: &Vec<::AugmentedSecretKeyShare>, apks: &Vec<::AugmentedPubKeyShare>, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, subset_type: &str, @@ -667,13 +879,10 @@ fn pinkas_wvuf_derive_eval_rks_multiexps< ); } -fn pinkas_wvuf_derive_eval_multipairing< - R: rand_core::RngCore + rand_core::CryptoRng, - M: Measurement, ->( +fn pinkas_wvuf_derive_eval_multipairing( // For efficiency, we re-use the PVSS transcript wc: &WeightedConfig, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, subset_type: &str, @@ -742,11 +951,10 @@ fn wvuf_eval< PubKeyShare = WT::DealtPubKeyShare, SecretKeyShare = WT::DealtSecretKeyShare, >, - M: Measurement, >( wc: &WeightedConfig, sk: &WVUF::SecretKey, - group: &mut BenchmarkGroup, + group: &mut BenchmarkGroup, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, { diff --git a/crates/aptos-dkg/src/pvss/das/unweighted_protocol.rs b/crates/aptos-dkg/src/pvss/das/unweighted_protocol.rs index c19a7430b6c56..8b90a45b6a7fe 100644 --- a/crates/aptos-dkg/src/pvss/das/unweighted_protocol.rs +++ b/crates/aptos-dkg/src/pvss/das/unweighted_protocol.rs @@ -164,6 +164,7 @@ impl traits::Transcript for Transcript { } // Derive challenges deterministically via Fiat-Shamir; easier to debug for distributed systems + // TODO: benchmark this let (f, extra) = fiat_shamir::fiat_shamir( self, sc, @@ -199,6 +200,7 @@ impl traits::Transcript for Transcript { // TODO(Performance): Change the Fiat-Shamir transform to use 128-bit random exponents. // r_i = \tau^i, \forall i \in [n] + // TODO: benchmark this let taus = get_nonzero_powers_of_tau(&extra[1], sc.n); // Compute the multiexps from above. diff --git a/crates/aptos-dkg/src/pvss/low_degree_test.rs b/crates/aptos-dkg/src/pvss/low_degree_test.rs index feabcd43f58b3..930820213a600 100644 --- a/crates/aptos-dkg/src/pvss/low_degree_test.rs +++ b/crates/aptos-dkg/src/pvss/low_degree_test.rs @@ -192,7 +192,7 @@ impl<'a> LowDegreeTest<'a> { /// [CD17e] SCRAPE: Scalable Randomness Attested by Public Entities; by Ignacio Cascudo and /// Bernardo David; in Cryptology ePrint Archive, Report 2017/216; 2017; /// https://eprint.iacr.org/2017/216 - fn dual_code_word(self) -> Vec { + pub fn dual_code_word(self) -> Vec { // println!("dual_code_word > t: {t}, n: {n}, includes_zero: {includes_zero}"); // Accounts for the size of `f` being the `n` evaluations of f(X) at the roots-of-unity and f(0) diff --git a/crates/aptos-dkg/src/pvss/test_utils.rs b/crates/aptos-dkg/src/pvss/test_utils.rs index 5b3c871622b5c..0a8a87b168af4 100644 --- a/crates/aptos-dkg/src/pvss/test_utils.rs +++ b/crates/aptos-dkg/src/pvss/test_utils.rs @@ -172,7 +172,11 @@ pub fn get_weighted_configs_for_testing() -> Vec { } pub fn get_threshold_configs_for_benchmarking() -> Vec { + // [XDL+24] The Latency Price of Threshold Cryptosystem in Blockchains; by Zhuolun Xiang et al; 2024 vec![ + ThresholdConfig::new(143, 254).unwrap(), // from XDL+24 + ThresholdConfig::new(184, 254).unwrap(), // from XDL+24 + ThresholdConfig::new(548, 821).unwrap(), // from initial deployment ThresholdConfig::new(333, 1_000).unwrap(), ThresholdConfig::new(666, 1_000).unwrap(), ThresholdConfig::new(3_333, 10_000).unwrap(), diff --git a/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs b/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs index 77a0fd00e760a..f5adc881040e2 100644 --- a/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs +++ b/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs @@ -29,7 +29,9 @@ pub struct WeightedConfig { /// `W[a, a + weight[player])`. Useful during weighted secret reconstruction. starting_index: Vec, /// The maximum weight of any player. - max_player_weight: usize, + max_weight: usize, + /// The minimum weight of any player. + min_weight: usize, } impl WeightedConfig { @@ -46,7 +48,8 @@ impl WeightedConfig { if weights.is_empty() { return Err(anyhow!("expected a non-empty vector of player weights")); } - let max_player_weight = *weights.iter().max().unwrap(); + let max_weight = *weights.iter().max().unwrap(); + let min_weight = *weights.iter().min().unwrap(); let n = weights.len(); let W = weights.iter().sum(); @@ -70,12 +73,47 @@ impl WeightedConfig { num_players: n, weight: weights, starting_index, - max_player_weight, + max_weight, + min_weight, }) } - pub fn get_max_player_weight(&self) -> usize { - self.max_player_weight + pub fn get_min_weight(&self) -> usize { + self.min_weight + } + + /// Returns _a_ player who has the smallest weight. + pub fn get_min_weight_player(&self) -> Player { + if let Some((i, _weight)) = self + .weight + .iter() + .enumerate() + .min_by_key(|&(_, &weight)| weight) + { + // println!("Player {} has the smallest weight: {}", i, _weight); + self.get_player(i) + } else { + panic!("Weights vector should not be empty"); + } + } + + /// Returns _a_ player who has the largest weight. + pub fn get_max_weight_player(&self) -> Player { + if let Some((i, _weight)) = self + .weight + .iter() + .enumerate() + .max_by_key(|&(_, &weight)| weight) + { + // println!("Player {} has the largest weight: {}", i, _weight); + self.get_player(i) + } else { + panic!("Weights vector should not be empty"); + } + } + + pub fn get_max_weight(&self) -> usize { + self.max_weight } pub fn get_threshold_config(&self) -> &ThresholdConfig { diff --git a/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs b/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs index c085640fb4d9a..11235ceafadd9 100644 --- a/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs +++ b/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - algebra::lagrange::lagrange_coefficients, + algebra::{lagrange::lagrange_coefficients, polynomials::get_powers_of_tau}, pvss, pvss::{Player, WeightedConfig}, - utils::{g1_multi_exp, multi_pairing, random::random_scalars, HasMultiExp}, + utils::{g1_multi_exp, multi_pairing, random::random_scalar, HasMultiExp}, weighted_vuf::traits::WeightedVUF, }; use anyhow::bail; @@ -82,13 +82,14 @@ impl WeightedVUF for BlsWUF { proof: &Self::ProofShare, ) -> anyhow::Result<()> { let hash = Self::hash_to_curve(msg); - // TODO: Use Fiat-Shamir - let coeffs = random_scalars(apk.len(), &mut thread_rng()); + // TODO: Use Fiat-Shamir instead of random_scalar + let coeffs = get_powers_of_tau(&random_scalar(&mut thread_rng()), apk.len()); let pks = apk .iter() .map(|pk| *pk.as_group_element()) .collect::>(); + // TODO: Calling multi-exp seems to decrease performance by 100+ microseconds even when |coeffs| = 1 and the coefficient is 1. Not sure what's going on here. let agg_pk = G2Projective::multi_exp_slice(pks.as_slice(), coeffs.as_slice()); let agg_sig = G1Projective::multi_exp_slice(proof.to_vec().as_slice(), coeffs.as_slice()); diff --git a/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs b/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs index fb83f422f0d7a..dd3795830f9ea 100644 --- a/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs +++ b/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs @@ -138,7 +138,7 @@ impl WeightedVUF for PinkasWUF { [&pks_combined, &pp.g_hat.neg()].into_iter(), ) != Gt::identity() { - bail!("RPKs were not correctly randomized."); + panic!("RPKs were not correctly randomized."); } Ok((delta, pk)) diff --git a/crates/aptos-faucet/core/src/endpoints/captcha.rs b/crates/aptos-faucet/core/src/endpoints/captcha.rs index 168222b3d787f..0975ec93e7915 100644 --- a/crates/aptos-faucet/core/src/endpoints/captcha.rs +++ b/crates/aptos-faucet/core/src/endpoints/captcha.rs @@ -34,7 +34,7 @@ impl CaptchaApi { path = "/request_captcha", method = "get", operation_id = "request_captcha", - response_header(name = "CAPTCHA_KEY", type = "u32", description = "Captcha key"), + response_header(name = "CAPTCHA_KEY", ty = "u32", description = "Captcha key"), tag = "ApiTags::Captcha" )] async fn request_captcha(&self) -> Result>>, AptosTapErrorResponse> { diff --git a/crates/aptos-faucet/core/src/funder/transfer.rs b/crates/aptos-faucet/core/src/funder/transfer.rs index 05da85b597825..d5b40d4c42332 100644 --- a/crates/aptos-faucet/core/src/funder/transfer.rs +++ b/crates/aptos-faucet/core/src/funder/transfer.rs @@ -22,7 +22,7 @@ use aptos_sdk::{ account_address::AccountAddress, chain_id::ChainId, transaction::{authenticator::AuthenticationKey, SignedTransaction, TransactionPayload}, - LocalAccount, + SupraCoinType, LocalAccount, }, }; use async_trait::async_trait; @@ -314,7 +314,7 @@ impl FunderTrait for TransferFunder { let account_address = self.faucet_account.read().await.address(); let funder_balance = match self .get_api_client() - .get_account_balance_bcs(account_address, "0x1::supra_coin::SupraCoin") + .get_account_balance_bcs::(account_address) .await { Ok(response) => response.into_inner(), diff --git a/crates/aptos-genesis/src/builder.rs b/crates/aptos-genesis/src/builder.rs index 21b86b4c4bfb7..c05de35bdd39d 100644 --- a/crates/aptos-genesis/src/builder.rs +++ b/crates/aptos-genesis/src/builder.rs @@ -166,7 +166,11 @@ impl ValidatorNodeConfig { // Init safety rules let validator_identity_file = self.dir.join(VALIDATOR_IDENTITY); config.consensus.safety_rules.initial_safety_rules_config = - InitialSafetyRulesConfig::from_file(validator_identity_file, waypoint_config.clone()); + InitialSafetyRulesConfig::from_file( + validator_identity_file, + vec![], + waypoint_config.clone(), + ); config.base.waypoint = waypoint_config; } diff --git a/crates/aptos-genesis/src/lib.rs b/crates/aptos-genesis/src/lib.rs index 784abf0aae33b..f80dc805b14fc 100644 --- a/crates/aptos-genesis/src/lib.rs +++ b/crates/aptos-genesis/src/lib.rs @@ -181,6 +181,7 @@ impl GenesisInfo { false, /* indexer */ BUFFERED_STATE_TARGET_ITEMS, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, + None, )?; let db_rw = DbReaderWriter::new(aptosdb); aptos_executor::db_bootstrapper::generate_waypoint::(&db_rw, genesis) diff --git a/crates/aptos-genesis/src/mainnet.rs b/crates/aptos-genesis/src/mainnet.rs index 23cb7e3d90c88..81b824b0f98e0 100644 --- a/crates/aptos-genesis/src/mainnet.rs +++ b/crates/aptos-genesis/src/mainnet.rs @@ -170,6 +170,7 @@ impl MainnetGenesisInfo { false, /* indexer */ BUFFERED_STATE_TARGET_ITEMS, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, + None, )?; let db_rw = DbReaderWriter::new(aptosdb); aptos_executor::db_bootstrapper::generate_waypoint::(&db_rw, genesis) diff --git a/crates/aptos-jwk-consensus/Cargo.toml b/crates/aptos-jwk-consensus/Cargo.toml index b5bf11ad1948d..62f56a242aa74 100644 --- a/crates/aptos-jwk-consensus/Cargo.toml +++ b/crates/aptos-jwk-consensus/Cargo.toml @@ -29,6 +29,7 @@ aptos-metrics-core = { workspace = true } aptos-network = { workspace = true } aptos-reliable-broadcast = { workspace = true } aptos-runtimes = { workspace = true } +aptos-safety-rules = { workspace = true } aptos-time-service = { workspace = true } aptos-types = { workspace = true } aptos-validator-transaction-pool = { workspace = true } diff --git a/crates/aptos-jwk-consensus/src/epoch_manager.rs b/crates/aptos-jwk-consensus/src/epoch_manager.rs index bd349d78c80c2..590435f65be77 100644 --- a/crates/aptos-jwk-consensus/src/epoch_manager.rs +++ b/crates/aptos-jwk-consensus/src/epoch_manager.rs @@ -8,11 +8,11 @@ use crate::{ types::JWKConsensusMsg, update_certifier::UpdateCertifier, }; -use anyhow::Result; +use anyhow::{anyhow, Result}; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; +use aptos_config::config::SafetyRulesConfig; use aptos_consensus_types::common::Author; -use aptos_crypto::ed25519::PrivateKey; use aptos_event_notifications::{ EventNotification, EventNotificationListener, ReconfigNotification, ReconfigNotificationListener, @@ -20,6 +20,7 @@ use aptos_event_notifications::{ use aptos_logger::{error, info}; use aptos_network::{application::interface::NetworkClient, protocols::network::Event}; use aptos_reliable_broadcast::ReliableBroadcast; +use aptos_safety_rules::{safety_rules_manager::storage, PersistentSafetyStorage}; use aptos_types::{ account_address::AccountAddress, epoch_state::EpochState, @@ -42,7 +43,7 @@ pub struct EpochManager { epoch_state: Option>, // credential - consensus_key: Arc, + key_storage: PersistentSafetyStorage, // events we subscribe reconfig_events: ReconfigNotificationListener

, @@ -65,7 +66,7 @@ pub struct EpochManager { impl EpochManager

{ pub fn new( my_addr: AccountAddress, - consensus_key: PrivateKey, + safety_rules_config: &SafetyRulesConfig, reconfig_events: ReconfigNotificationListener

, jwk_updated_events: EventNotificationListener, self_sender: aptos_channels::Sender>, @@ -74,7 +75,7 @@ impl EpochManager

{ ) -> Self { Self { my_addr, - consensus_key: Arc::new(consensus_key), + key_storage: storage(safety_rules_config), epoch_state: None, reconfig_events, jwk_updated_events, @@ -144,10 +145,11 @@ impl EpochManager

{ .await .expect("Reconfig sender dropped, unable to start new epoch"); self.start_new_epoch(reconfig_notification.on_chain_configs) - .await; + .await + .unwrap(); } - async fn start_new_epoch(&mut self, payload: OnChainConfigPayload

) { + async fn start_new_epoch(&mut self, payload: OnChainConfigPayload

) -> Result<()> { let validator_set: ValidatorSet = payload .get() .expect("failed to get ValidatorSet from payload"); @@ -210,9 +212,15 @@ impl EpochManager

{ BoundedExecutor::new(8, tokio::runtime::Handle::current()), ); let update_certifier = UpdateCertifier::new(rb); - + let my_pk = epoch_state + .verifier + .get_public_key(&self.my_addr) + .ok_or_else(|| anyhow!("my pk not found in validator set"))?; + let my_sk = self.key_storage.consensus_sk_by_pk(my_pk).map_err(|e| { + anyhow!("jwk-consensus new epoch handling failed with consensus sk lookup err: {e}") + })?; let jwk_consensus_manager = JWKManager::new( - self.consensus_key.clone(), + Arc::new(my_sk), self.my_addr, epoch_state.clone(), Arc::new(update_certifier), @@ -236,12 +244,13 @@ impl EpochManager

{ )); info!(epoch = epoch_state.epoch, "JWKManager spawned.",); } + Ok(()) } async fn on_new_epoch(&mut self, reconfig_notification: ReconfigNotification

) -> Result<()> { self.shutdown_current_processor().await; self.start_new_epoch(reconfig_notification.on_chain_configs) - .await; + .await?; Ok(()) } diff --git a/crates/aptos-jwk-consensus/src/lib.rs b/crates/aptos-jwk-consensus/src/lib.rs index 46a736c9b473d..f2139ed5acbcc 100644 --- a/crates/aptos-jwk-consensus/src/lib.rs +++ b/crates/aptos-jwk-consensus/src/lib.rs @@ -5,7 +5,7 @@ use crate::{ epoch_manager::EpochManager, network::NetworkTask, network_interface::JWKConsensusNetworkClient, types::JWKConsensusMsg, }; -use aptos_crypto::ed25519::PrivateKey; +use aptos_config::config::SafetyRulesConfig; use aptos_event_notifications::{ DbBackedOnChainConfig, EventNotificationListener, ReconfigNotificationListener, }; @@ -17,7 +17,7 @@ use tokio::runtime::Runtime; #[allow(clippy::let_and_return)] pub fn start_jwk_consensus_runtime( my_addr: AccountAddress, - consensus_key: PrivateKey, + safety_rules_config: &SafetyRulesConfig, network_client: NetworkClient, network_service_events: NetworkServiceEvents, reconfig_events: ReconfigNotificationListener, @@ -29,7 +29,7 @@ pub fn start_jwk_consensus_runtime( let jwk_consensus_network_client = JWKConsensusNetworkClient::new(network_client); let epoch_manager = EpochManager::new( my_addr, - consensus_key, + safety_rules_config, reconfig_events, jwk_updated_events, self_sender, diff --git a/crates/aptos-logger/Cargo.toml b/crates/aptos-logger/Cargo.toml index d1bd444568709..67ab13acf7145 100644 --- a/crates/aptos-logger/Cargo.toml +++ b/crates/aptos-logger/Cargo.toml @@ -17,7 +17,7 @@ rust-version = { workspace = true } [dependencies] aptos-infallible = { workspace = true } aptos-log-derive = { workspace = true } -aptos-node-identity = { workspace = true } +aptos-node-identity = { workspace = true, optional = true } backtrace = { workspace = true } chrono = { workspace = true } console-subscriber = { workspace = true, optional = true } @@ -40,6 +40,7 @@ pretty_assertions = { workspace = true } [features] default = [] tokio-console = ["console-subscriber"] +node-identity = ["aptos-node-identity"] [package.metadata.cargo-machete] ignored = ["strum"] diff --git a/crates/aptos-logger/src/aptos_logger.rs b/crates/aptos-logger/src/aptos_logger.rs index 9d6843e7a289a..cdeb02974339f 100644 --- a/crates/aptos-logger/src/aptos_logger.rs +++ b/crates/aptos-logger/src/aptos_logger.rs @@ -204,8 +204,21 @@ impl LogEntry { let hostname = HOSTNAME.as_deref(); let namespace = NAMESPACE.as_deref(); - let peer_id = aptos_node_identity::peer_id_as_str(); - let chain_id = aptos_node_identity::chain_id().map(|chain_id| chain_id.id()); + + let peer_id: Option<&str>; + let chain_id: Option; + + #[cfg(node_identity)] + { + peer_id = aptos_node_identity::peer_id_as_str(); + chain_id = aptos_node_identity::chain_id().map(|chain_id| chain_id.id()); + } + + #[cfg(not(node_identity))] + { + peer_id = None; + chain_id = None; + } let backtrace = if enable_backtrace && matches!(metadata.level(), Level::Error) { let mut backtrace = Backtrace::new(); diff --git a/crates/aptos-metrics-core/src/lib.rs b/crates/aptos-metrics-core/src/lib.rs index dc83c3147b3b8..5d207a8b1dd47 100644 --- a/crates/aptos-metrics-core/src/lib.rs +++ b/crates/aptos-metrics-core/src/lib.rs @@ -18,22 +18,51 @@ pub mod op_counters; pub trait TimerHelper { fn timer_with(&self, labels: &[&str]) -> HistogramTimer; + + fn observe_with(&self, labels: &[&str], val: f64); } impl TimerHelper for HistogramVec { fn timer_with(&self, vals: &[&str]) -> HistogramTimer { self.with_label_values(vals).start_timer() } + + fn observe_with(&self, labels: &[&str], val: f64) { + self.with_label_values(labels).observe(val) + } +} + +pub struct ConcurrencyGauge { + gauge: IntGauge, +} + +impl ConcurrencyGauge { + fn new(gauge: IntGauge) -> Self { + gauge.inc(); + Self { gauge } + } +} + +impl Drop for ConcurrencyGauge { + fn drop(&mut self) { + self.gauge.dec(); + } } pub trait IntGaugeHelper { fn set_with(&self, labels: &[&str], val: i64); + + fn concurrency_with(&self, labels: &[&str]) -> ConcurrencyGauge; } impl IntGaugeHelper for IntGaugeVec { fn set_with(&self, labels: &[&str], val: i64) { self.with_label_values(labels).set(val) } + + fn concurrency_with(&self, labels: &[&str]) -> ConcurrencyGauge { + ConcurrencyGauge::new(self.with_label_values(labels)) + } } pub trait IntCounterHelper { diff --git a/crates/aptos-openapi/Cargo.toml b/crates/aptos-openapi/Cargo.toml index 0cf328dcb665c..352c79181ec65 100644 --- a/crates/aptos-openapi/Cargo.toml +++ b/crates/aptos-openapi/Cargo.toml @@ -13,7 +13,6 @@ repository = { workspace = true } rust-version = { workspace = true } [dependencies] -async-trait = { workspace = true } percent-encoding = { workspace = true } poem = { workspace = true } poem-openapi = { workspace = true } diff --git a/crates/aptos-openapi/src/helpers.rs b/crates/aptos-openapi/src/helpers.rs index c5aa830adea58..9763e53958721 100644 --- a/crates/aptos-openapi/src/helpers.rs +++ b/crates/aptos-openapi/src/helpers.rs @@ -12,7 +12,7 @@ //! too unrelated, or even worse, in a totally different crate (the move //! types are a great example of this). //! - The type is not expressible via OpenAPI. For example, an enum that -//! has some enum variants with values and others without values.This is +//! has some enum variants with values and others without values. This is //! not allowed in OpenAPI, types must be either unions (variants with //! values) or enums (variants without values). //! - We would prefer to serialize the data differently than its standard @@ -136,6 +136,8 @@ macro_rules! impl_poem_type { ::poem::http::HeaderValue::from_str(&string).ok() } } + + impl ::poem_openapi::types::IsObjectType for $ty {} }; } @@ -156,7 +158,6 @@ macro_rules! impl_poem_parameter { } } - #[async_trait::async_trait] impl ::poem_openapi::types::ParseFromMultipartField for $ty { async fn parse_from_multipart(field: Option<::poem::web::Field>) -> ::poem_openapi::types::ParseResult { match field { diff --git a/crates/aptos-rest-client/src/lib.rs b/crates/aptos-rest-client/src/lib.rs index 3271ed06432eb..222a34effbcc4 100644 --- a/crates/aptos-rest-client/src/lib.rs +++ b/crates/aptos-rest-client/src/lib.rs @@ -38,6 +38,7 @@ use aptos_types::{ contract_event::EventWithVersion, state_store::state_key::StateKey, transaction::SignedTransaction, + CoinType, }; use move_core_types::language_storage::StructTag; use reqwest::{ @@ -220,16 +221,12 @@ impl Client { }) } - pub async fn get_account_balance_bcs( + pub async fn get_account_balance_bcs( &self, address: AccountAddress, - coin_type: &str, ) -> AptosResult> { let resp = self - .get_account_resource_bcs::( - address, - &format!("0x1::coin::CoinStore<{}>", coin_type), - ) + .get_account_resource_bcs::>(address, &C::type_tag().to_string()) .await?; resp.and_then(|resource| Ok(resource.coin())) } diff --git a/crates/aptos-rosetta/Cargo.toml b/crates/aptos-rosetta/Cargo.toml index 97ca86f6cdc71..b38cf9466b527 100644 --- a/crates/aptos-rosetta/Cargo.toml +++ b/crates/aptos-rosetta/Cargo.toml @@ -35,7 +35,6 @@ once_cell = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -serde_yaml = { workspace = true } tokio = { workspace = true } url = { workspace = true } warp = { workspace = true } diff --git a/crates/aptos-rosetta/src/account.rs b/crates/aptos-rosetta/src/account.rs index 004a1506924e6..f75322c2fd99f 100644 --- a/crates/aptos-rosetta/src/account.rs +++ b/crates/aptos-rosetta/src/account.rs @@ -18,7 +18,7 @@ use crate::{ use aptos_logger::{debug, trace, warn}; use aptos_types::{ account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResource}, + account_config::{AccountResource, CoinStoreResourceUntyped}, }; use std::{collections::HashSet, str::FromStr}; use warp::Filter; @@ -60,12 +60,14 @@ async fn account_balance( get_block_index_from_request(&server_context, request.block_identifier.clone()).await?; // Version to grab is the last entry in the block (balance is at end of block) + // NOTE: In Rosetta, we always do balances by block here rather than ledger version. let block_info = server_context .block_cache()? .get_block_info_by_height(block_height, server_context.chain_id) .await?; let balance_version = block_info.last_version; + // Retrieve all metadata we want to provide as an on-demand lookup let (sequence_number, operators, balances, lockup_expiration) = get_balances( &rest_client, request.account_identifier, @@ -100,6 +102,7 @@ async fn get_balances( let mut lockup_expiration: u64 = 0; let mut total_requested_balance: Option = None; + // Lookup the delegation pool, if it's provided in the account information if pool_address.is_some() { match get_delegation_stake_balances( rest_client, @@ -135,6 +138,7 @@ async fn get_balances( } // Retrieve all account resources + // TODO: This will need to change for FungibleAssets, will need to lookup on a list of known FAs if let Ok(response) = rest_client .get_account_resources_at_version_bcs(owner_address, version) .await @@ -150,14 +154,18 @@ async fn get_balances( struct_tag.module.as_str(), struct_tag.name.as_str(), ) { + // Retrieve the sequence number from the account resource + // TODO: Make a separate call for this (AccountAddress::ONE, ACCOUNT_MODULE, ACCOUNT_RESOURCE) => { let account: AccountResource = bcs::from_bytes(&bytes)?; maybe_sequence_number = Some(account.sequence_number()) }, + // Parse all associated coin stores + // TODO: This would need to be expanded to support other coin stores (AccountAddress::ONE, COIN_MODULE, COIN_STORE_RESOURCE) => { // Only show coins on the base account if account.is_base_account() { - let coin_store: CoinStoreResource = bcs::from_bytes(&bytes)?; + let coin_store: CoinStoreResourceUntyped = bcs::from_bytes(&bytes)?; if let Some(coin_type) = struct_tag.type_args.first() { // Only display supported coins if coin_type == &native_coin_tag() { @@ -169,6 +177,7 @@ async fn get_balances( } } }, + // Parse all staking contract data to know the underlying balances of the pools (AccountAddress::ONE, STAKING_CONTRACT_MODULE, STORE_RESOURCE) => { if account.is_base_account() || pool_address.is_some() { continue; @@ -229,6 +238,8 @@ async fn get_balances( } } + // Retrieves the sequence number accordingly + // TODO: Sequence number should be 0 if it isn't retrieved probably let sequence_number = if let Some(sequence_number) = maybe_sequence_number { sequence_number } else { @@ -266,6 +277,8 @@ async fn get_balances( lockup_expiration, )) } else { + // If it fails, we return 0 + // TODO: This should probably be fixed to check if the account exists. Then if the account doesn't exist, return empty balance, otherwise error Ok(( 0, None, diff --git a/crates/aptos-rosetta/src/block.rs b/crates/aptos-rosetta/src/block.rs index 6ee61a6f218a4..59b41c2a637b0 100644 --- a/crates/aptos-rosetta/src/block.rs +++ b/crates/aptos-rosetta/src/block.rs @@ -27,8 +27,8 @@ pub fn block_route( /// Retrieves a block (in this case a single transaction) given it's identifier. /// -/// Our implementation allows for by `index`, which is the ledger `version` or by -/// transaction `hash`. +/// Our implementation allows for by `index`(block height) or by transaction `hash`. +/// If both are provided, `index` is used /// /// [API Spec](https://www.rosetta-api.org/docs/BlockApi.html#block) async fn block(request: BlockRequest, server_context: RosettaContext) -> ApiResult { @@ -41,7 +41,7 @@ async fn block(request: BlockRequest, server_context: RosettaContext) -> ApiResu check_network(request.network_identifier, &server_context)?; - // Retrieve by block or by hash, both or neither is not allowed + // Retrieve by block index or by hash, neither is not allowed let block_index = get_block_index_from_request(&server_context, request.block_identifier).await?; @@ -52,11 +52,15 @@ async fn block(request: BlockRequest, server_context: RosettaContext) -> ApiResu ) .await?; + // A hack to reduce overhead, if set, it will drop empty transactions (no operations0 from the + // block to reduce traffic sent let keep_empty_transactions = request .metadata .as_ref() .and_then(|inner| inner.keep_empty_transactions) .unwrap_or_default(); + + // Build the block accordingly from the input data let block = build_block( &server_context, parent_transaction, @@ -77,7 +81,7 @@ async fn build_block( chain_id: ChainId, keep_empty_transactions: bool, ) -> ApiResult { - // note: timestamps are in microseconds, so we convert to milliseconds + // NOTE: timestamps are in microseconds, so we convert to milliseconds for Rosetta let timestamp = get_timestamp(block.block_timestamp); let block_identifier = BlockIdentifier::from_block(&block, chain_id); @@ -85,15 +89,19 @@ async fn build_block( let mut transactions: Vec = Vec::new(); // TODO: Parallelize these and then sort at end if let Some(txns) = block.transactions { + // Convert transactions to Rosetta format for txn in txns { let transaction = Transaction::from_transaction(server_context, txn).await?; + + // Skip transactions that don't have any operations, since that's the only thing that's being used by Rosetta if keep_empty_transactions || !transaction.operations.is_empty() { transactions.push(transaction) } } } - // Ensure the transactions are sorted in order + // Ensure the transactions are sorted in order, this is required by Rosetta + // NOTE: sorting may be pretty expensive, depending on the size of the block transactions.sort_by(|first, second| first.metadata.version.0.cmp(&second.metadata.version.0)); Ok(Block { @@ -104,7 +112,7 @@ async fn build_block( }) } -/// Retrieves a block by its index +/// Retrieves a block by its index (block height) async fn get_block_by_index( block_cache: &BlockRetriever, block_height: u64, @@ -132,6 +140,7 @@ async fn get_block_by_index( } } +/// Abbreviated information about a Block without transactions #[derive(Clone, Debug)] pub struct BlockInfo { /// Block identifier (block hash & block height) @@ -170,6 +179,7 @@ impl BlockRetriever { } } + /// Retrieves block abbreviated info by height pub async fn get_block_info_by_height( &self, height: u64, @@ -191,11 +201,14 @@ impl BlockRetriever { Ok(BlockInfo::from_block(&block, chain_id)) } + /// Retrieves the block by height pub async fn get_block_by_height( &self, height: u64, with_transactions: bool, ) -> ApiResult { + // If we request transactions, we have to provide the page size, it ideally is bigger than + // the maximum block size. If not, transactions will be missed. if with_transactions { Ok(self .rest_client diff --git a/crates/aptos-rosetta/src/common.rs b/crates/aptos-rosetta/src/common.rs index 1e6040410af18..e46072743dcc2 100644 --- a/crates/aptos-rosetta/src/common.rs +++ b/crates/aptos-rosetta/src/common.rs @@ -27,6 +27,9 @@ pub const Y2K_MS: u64 = 946713600000; pub const BLOCKCHAIN: &str = "aptos"; /// Checks the request network matches the server network +/// +/// These fields are passed in on every request, and basically prevents non-Aptos and matching chain-id +/// requests from going through and messing things up. pub fn check_network( network_identifier: NetworkIdentifier, server_context: &RosettaContext, @@ -49,6 +52,7 @@ pub fn with_context( warp::any().map(move || context.clone()) } +/// Fills in an empty request for any REST API path that doesn't take any input body pub fn with_empty_request() -> impl Filter + Clone { warp::any().map(move || MetadataRequest {}) @@ -92,6 +96,7 @@ where } } +/// Retrieves an account's information by its address pub async fn get_account( rest_client: &aptos_rest_client::Client, address: AccountAddress, @@ -119,16 +124,20 @@ pub fn strip_hex_prefix(str: &str) -> &str { str.strip_prefix("0x").unwrap_or(str) } +/// Encodes the object into BCS, handling errors pub fn encode_bcs(obj: &T) -> ApiResult { let bytes = bcs::to_bytes(obj)?; Ok(hex::encode(bytes)) } +/// Decodes the object from BCS, handling errors pub fn decode_bcs(str: &str, type_name: &'static str) -> ApiResult { let bytes = hex::decode(str)?; bcs::from_bytes(&bytes).map_err(|_| ApiError::deserialization_failed(type_name)) } +/// Decodes a CryptoMaterial (key, signature, etc.) from Hex +/// TODO: Rename to decode_crypto_material pub fn decode_key( str: &str, type_name: &'static str, @@ -139,6 +148,7 @@ pub fn decode_key( const DEFAULT_COIN: &str = "SUPRA"; const DEFAULT_DECIMALS: u8 = 8; +/// Provides the [Currency] for 0x1::aptos_coin::AptosCoin aka APT pub fn native_coin() -> Currency { Currency { symbol: DEFAULT_COIN.to_string(), @@ -149,6 +159,7 @@ pub fn native_coin() -> Currency { } } +/// Provides the [TypeTag] for 0x1::aptos_coin::AptosCoin aka APT pub fn native_coin_tag() -> TypeTag { TypeTag::Struct(Box::new(StructTag { address: AccountAddress::ONE, @@ -158,6 +169,9 @@ pub fn native_coin_tag() -> TypeTag { })) } +/// Tells us whether the coin is APT and errors if it's not +/// +/// TODO: This is the function that needs to be replaced to handle more coin types pub fn is_native_coin(currency: &Currency) -> ApiResult<()> { if currency == &native_coin() { Ok(()) @@ -167,25 +181,35 @@ pub fn is_native_coin(currency: &Currency) -> ApiResult<()> { } /// Determines which block to pull for the request +/// +/// Inputs can give hash, index, or both pub async fn get_block_index_from_request( server_context: &RosettaContext, partial_block_identifier: Option, ) -> ApiResult { Ok(match partial_block_identifier { + // If Index and hash are provided, we use index, because it's easier to use. + // Note, we don't handle if they mismatch. + // + // This is required. Rosetta originally only took one or the other, and this failed in + // integration testing. Some(PartialBlockIdentifier { index: Some(block_index), hash: Some(_), }) => block_index, + // Lookup by block index Some(PartialBlockIdentifier { index: Some(block_index), hash: None, }) => block_index, + // Lookup by block hash Some(PartialBlockIdentifier { index: None, hash: Some(hash), }) => BlockHash::from_str(&hash)?.block_height(server_context.chain_id)?, + // Lookup latest version _ => { let response = server_context @@ -199,6 +223,10 @@ pub async fn get_block_index_from_request( }) } +/// BlockHash is not actually the block hash! This was a hack put in, since we don't actually have +/// [BlockHash] indexable. Instead, it just returns the combination of [ChainId] and the block_height (aka index). +/// +/// The [BlockHash] string format is `chain_id-block_height` #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct BlockHash { chain_id: ChainId, @@ -213,6 +241,9 @@ impl BlockHash { } } + /// Fetch the block height + /// + /// We verify the chain_id to ensure it is the correct network pub fn block_height(&self, expected_chain_id: ChainId) -> ApiResult { if expected_chain_id != self.chain_id { Err(ApiError::InvalidInput(Some(format!( @@ -228,9 +259,11 @@ impl BlockHash { impl FromStr for BlockHash { type Err = ApiError; + /// Parses `chain_id-block_height` fn from_str(str: &str) -> Result { let mut iter = str.split('-'); + // It must start with a chain-id let chain_id = if let Some(maybe_chain_id) = iter.next() { ChainId::from_str(maybe_chain_id).map_err(|_| { ApiError::InvalidInput(Some(format!( @@ -245,6 +278,7 @@ impl FromStr for BlockHash { )))); }; + // Chain id must be followed after a `-` with block height let block_height = if let Some(maybe_block_height) = iter.next() { u64::from_str(maybe_block_height).map_err(|_| { ApiError::InvalidInput(Some(format!( @@ -259,6 +293,7 @@ impl FromStr for BlockHash { )))); }; + // Don't allow any more hyphens or characters if iter.next().is_some() { Err(ApiError::InvalidInput(Some(format!( "Invalid block hash, too many hyphens {}", @@ -282,6 +317,7 @@ pub fn to_hex_lower(obj: &T) -> String { /// Retrieves the currency from the given parameters /// TODO: What do do about the type params? +/// TODO: Handle other currencies, will need to be passed in as a config file or something on startup pub fn parse_currency(address: AccountAddress, module: &str, name: &str) -> ApiResult { match (address, module, name) { (AccountAddress::ONE, APTOS_COIN_MODULE, APTOS_COIN_RESOURCE) => Ok(native_coin()), diff --git a/crates/aptos-rosetta/src/construction.rs b/crates/aptos-rosetta/src/construction.rs index b104c2b725eec..6a5bdc2ca3b91 100644 --- a/crates/aptos-rosetta/src/construction.rs +++ b/crates/aptos-rosetta/src/construction.rs @@ -9,12 +9,12 @@ //! //! This is broken down in the following flow: //! -//! * Preprocess (based on operations) gets information to fetch from metadata (onchchain) -//! * Metadata fetches onchain information e.g. sequence number +//! * Preprocess (based on operations) gets information to fetch from metadata (on-chain) +//! * Metadata fetches on-chain information e.g. sequence number //! * Payloads generates an unsigned transaction -//! * Application outside signs the payload from the transactino +//! * Application outside signs the payload from the transaction //! * Combine puts the signed transaction payload with the unsigned transaction -//! * Submit submits the signed transaciton to the blockchain +//! * Submit submits the signed transaction to the blockchain //! //! There are also 2 other sometimes used APIs //! * Derive (get an account from the private key) @@ -139,7 +139,9 @@ pub fn submit_route( /// Construction combine command (OFFLINE) /// -/// This combines signatures, and a raw txn +/// This combines signatures, and a raw transaction +/// +/// This currently only supports the original Ed25519 with single signer. /// /// [API Spec](https://www.rosetta-api.org/docs/ConstructionApi.html#constructioncombine) async fn construction_combine( @@ -149,6 +151,7 @@ async fn construction_combine( debug!("/construction/combine {:?}", request); check_network(request.network_identifier, &server_context)?; + // Decode the unsigned transaction from BCS in the input let unsigned_txn: RawTransaction = decode_bcs(&request.unsigned_transaction, "UnsignedTransaction")?; @@ -162,16 +165,19 @@ async fn construction_combine( let signature = &request.signatures[0]; + // Only support Ed25519 if signature.signature_type != SignatureType::Ed25519 || signature.public_key.curve_type != CurveType::Edwards25519 { return Err(ApiError::InvalidSignatureType); } + // Decode the key and signature accordingly let public_key: Ed25519PublicKey = decode_key(&signature.public_key.hex_bytes, "Ed25519PublicKey")?; let signature: Ed25519Signature = decode_key(&signature.hex_bytes, "Ed25519Signature")?; + // Combine them into a signed transaction, and encode it as BCS to return let signed_txn = SignedTransaction::new(unsigned_txn, public_key, signature); Ok(ConstructionCombineResponse { @@ -185,6 +191,9 @@ async fn construction_combine( /// Note: This only works for new accounts. After the account is created, all APIs should provide /// both account and key. /// +/// Note: if the accounts are handled ONLY by Rosetta, then this will always work. It only stops working +/// if it is one of many other types of keys / a rotated account. +/// /// [API Spec](https://www.rosetta-api.org/docs/ConstructionApi.html#constructionderive) async fn construction_derive( request: ConstructionDeriveRequest, @@ -193,6 +202,8 @@ async fn construction_derive( debug!("/construction/derive {:?}", request); check_network(request.network_identifier, &server_context)?; + // The input must be an Ed25519 Public key and will only derive the Address for the original + // Aptos Ed25519 authentication scheme let public_key: Ed25519PublicKey = decode_key(&request.public_key.hex_bytes, "Ed25519PublicKey")?; let address = AuthenticationKey::ed25519(&public_key).account_address(); @@ -214,6 +225,8 @@ async fn construction_hash( debug!("/construction/hash {:?}", request); check_network(request.network_identifier, &server_context)?; + // Decode the SignedTransaction and hash it accordingly. This in theory works for any transaction + // but it is expected to only be UserTransactions let signed_transaction: SignedTransaction = decode_bcs(&request.signed_transaction, "SignedTransaction")?; @@ -222,11 +235,13 @@ async fn construction_hash( }) } -/// Fills in the operator for actions that require it but don't have one +/// Fills in the operator for actions that require it but don't have one on an [InternalOperation] +/// TODO: move this onto [InternalOperation] and not in this file async fn fill_in_operator( rest_client: &aptos_rest_client::Client, mut internal_operation: InternalOperation, ) -> ApiResult { + // TODO: Refactor so there's not duplicate code below match &mut internal_operation { InternalOperation::SetOperator(op) => { // If there was no old operator set, and there is only one, we should use that @@ -290,6 +305,13 @@ async fn fill_in_operator( Ok(internal_operation) } +/// Simulates a transaction for gas estimation purposes +/// +/// Only the original Ed25519 accounts on Aptos are supported +/// +/// Will only simulate if it does not have max gas amount +/// +/// Will only estimate gas price async fn simulate_transaction( rest_client: &aptos_rest_client::Client, chain_id: ChainId, @@ -301,6 +323,7 @@ async fn simulate_transaction( let mut transaction_factory = TransactionFactory::new(chain_id); // If we have a gas unit price, let's not estimate + // TODO: Split into separate function if let Some(gas_unit_price) = options.gas_price_per_unit.as_ref() { transaction_factory = transaction_factory.with_gas_unit_price(gas_unit_price.0); } else { @@ -342,7 +365,6 @@ async fn simulate_transaction( .build(); // Read and fill in public key as necessary, this is required for simulation! - // TODO: Only single signer supported let public_key = if let Some(public_key) = options.public_keys.as_ref().and_then(|inner| inner.first()) { Ed25519PublicKey::from_encoded_string(&public_key.hex_bytes).map_err(|err| { @@ -424,7 +446,7 @@ async fn simulate_transaction( /// Construction metadata command /// -/// Retrieve sequence number for submitting transactions +/// Retrieves sequence number, gas price, max gas, gas estimate for the transaction /// /// [API Spec](https://www.rosetta-api.org/docs/ConstructionApi.html#constructionmetadata) async fn construction_metadata( @@ -443,10 +465,10 @@ async fn construction_metadata( return Err(ApiError::ChainIdMismatch); } + // Retrieve the sequence number from the rest server if one wasn't provided let sequence_number = if let Some(sequence_number) = request.options.sequence_number { sequence_number.0 } else { - // Retrieve the sequence number from the rest server if one wasn't provided response.inner().sequence_number }; @@ -490,6 +512,8 @@ async fn construction_parse( ) -> ApiResult { debug!("/construction/parse {:?}", request); check_network(request.network_identifier, &server_context)?; + + // For signed transactions, we can pull the signers and the raw transaction let metadata; let (account_identifier_signers, unsigned_txn) = if request.signed { let signed_txn: SignedTransaction = decode_bcs(&request.transaction, "SignedTransaction")?; @@ -509,6 +533,7 @@ async fn construction_parse( signed_txn.into_raw_transaction(), ) } else { + // For unsigned transactions,w e can only pull the transaction let unsigned_txn: RawTransaction = decode_bcs(&request.transaction, "UnsignedTransaction")?; metadata = Some(ConstructionParseMetadata { unsigned_transaction: Some(unsigned_txn.clone()), @@ -516,9 +541,12 @@ async fn construction_parse( }); (None, unsigned_txn) }; + + // The sender however should always be present, even if not signed let sender = unsigned_txn.sender(); - // This is messy, but all we can do + // This is messy, but all we can do is to manually go through and check the entry functions associated to convert to Rosetta operations + // TODO: We should centralize all this operation -> entry function / entry function -> operation code let operations = match unsigned_txn.into_payload() { TransactionPayload::EntryFunction(inner) => { let (module, function_name, type_args, args) = inner.into_inner(); @@ -602,6 +630,7 @@ async fn construction_parse( }) } +/// Parses 0x1::aptos_account::create(auth_key: address) fn parse_create_account_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -632,6 +661,7 @@ fn parse_create_account_operation( } } +/// Parses 0x1::coin::transfer(receiver: address, amount: u64) fn parse_transfer_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -640,7 +670,6 @@ fn parse_transfer_operation( let mut operations = Vec::new(); // Check coin is the native coin - let currency = match type_args.first() { Some(TypeTag::Struct(struct_tag)) => { let StructTag { @@ -660,7 +689,6 @@ fn parse_transfer_operation( }; // Retrieve the args for the operations - let receiver: AccountAddress = if let Some(receiver) = args.first() { bcs::from_bytes(receiver)? } else { @@ -693,6 +721,7 @@ fn parse_transfer_operation( Ok(operations) } +/// Parses 0x1::aptos_account::transfer(receiver: address, amount: u64) fn parse_account_transfer_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -708,7 +737,7 @@ fn parse_account_transfer_operation( let mut operations = Vec::new(); // Retrieve the args for the operations - + // TODO: This is the same as coin::transfer, we should combine them let receiver: AccountAddress = if let Some(receiver) = args.first() { bcs::from_bytes(receiver)? } else { @@ -741,6 +770,7 @@ fn parse_account_transfer_operation( Ok(operations) } +/// Parses a specific BCS function argument to the given type pub fn parse_function_arg( name: &str, args: &[Vec], @@ -758,6 +788,7 @@ pub fn parse_function_arg( )))) } +/// Parses 0x1::staking_contract::switch_operator_with_same_commission(old_operator: address, new_operator: address) pub fn parse_set_operator_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -782,6 +813,7 @@ pub fn parse_set_operator_operation( )]) } +/// Parses 0x1::staking_contract::update_voter(operator: address, new_voter: address) pub fn parse_set_voter_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -805,6 +837,7 @@ pub fn parse_set_voter_operation( )]) } +/// Parses 0x1::staking_contract::create_staking_contract(operator: address, voter: address, amount: u64, commission_percentage: u64) pub fn parse_create_stake_pool_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -832,6 +865,7 @@ pub fn parse_create_stake_pool_operation( )]) } +/// Parses 0x1::staking_contract::reset_lockup(operator: address) pub fn parse_reset_lockup_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -853,6 +887,7 @@ pub fn parse_reset_lockup_operation( )]) } +/// Parses 0x1::staking_contract::unlock_stake(operator: address, amount: u64) pub fn parse_unlock_stake_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -877,6 +912,7 @@ pub fn parse_unlock_stake_operation( )]) } +/// Parses 0x1::staking_contract::update_commission(operator: address, new_commission_percentage: u64) pub fn parse_update_commission_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -901,6 +937,7 @@ pub fn parse_update_commission_operation( )]) } +/// Parses 0x1::staking_contract::distribute(staker: address, operator: address) pub fn parse_distribute_staking_rewards_operation( sender: AccountAddress, type_args: &[TypeTag], @@ -925,6 +962,7 @@ pub fn parse_distribute_staking_rewards_operation( )]) } +/// Parses 0x1::delegation_pool::add_stake(pool_address: address, amount: u64) pub fn parse_delegation_pool_add_stake_operation( delegator: AccountAddress, type_args: &[TypeTag], @@ -949,6 +987,7 @@ pub fn parse_delegation_pool_add_stake_operation( )]) } +/// Parses 0x1::delegation_pool::unlock(pool_address: address, amount: u64) pub fn parse_delegation_pool_unlock_operation( delegator: AccountAddress, type_args: &[TypeTag], @@ -973,6 +1012,7 @@ pub fn parse_delegation_pool_unlock_operation( )]) } +/// Parses 0x1::delegation_pool::withdraw(pool_address: address, amount: u64) pub fn parse_delegation_pool_withdraw_operation( delegator: AccountAddress, type_args: &[TypeTag], @@ -999,7 +1039,7 @@ pub fn parse_delegation_pool_withdraw_operation( /// Construction payloads command (OFFLINE) /// -/// Constructs payloads for given known operations +/// Constructs payloads for given known operations. This converts Rosetta [Operation]s to a [RawTransaction] /// /// [API Spec](https://www.rosetta-api.org/docs/ConstructionApi.html#constructionpayloads) async fn construction_payloads( @@ -1009,8 +1049,11 @@ async fn construction_payloads( debug!("/construction/payloads {:?}", request); check_network(request.network_identifier, &server_context)?; - // Retrieve the real operation we're doing + // Retrieve the real operation we're doing, this identifies the sub-operations to a function let mut operation = InternalOperation::extract(&request.operations)?; + + // For some reason, metadata is optional on the Rosetta spec, we enforce it here, otherwise we + // can't build the [RawTransaction] offline. let metadata = if let Some(ref metadata) = request.metadata { metadata } else { @@ -1018,6 +1061,8 @@ async fn construction_payloads( }; // This is a hack to ensure that the payloads actually have overridden operators if not provided + // It ensures that the operations provided match the metadata provided. + // TODO: Move this to a separate function match &mut operation { InternalOperation::CreateAccount(_) => { if operation != metadata.internal_operation { @@ -1225,6 +1270,7 @@ async fn construction_payloads( } let unsigned_transaction = txn_builder.build(); + // Build a signing message so that an external signer can sign with Ed25519 without knowing BCS let signing_message = hex::encode(signing_message(&unsigned_transaction).map_err(|err| { ApiError::InvalidInput(Some(format!( "Invalid transaction, can't build into a signing message {}", @@ -1246,7 +1292,8 @@ async fn construction_payloads( /// Construction preprocess command (OFFLINE) /// -/// This creates the request needed to fetch metadata +/// This creates the request needed to fetch metadata. It basically verifies that the inputs are +/// valid for calling on-chain data. /// /// [API Spec](https://www.rosetta-api.org/docs/ConstructionApi.html#constructionpreprocess) async fn construction_preprocess( @@ -1256,9 +1303,13 @@ async fn construction_preprocess( debug!("/construction/preprocess {:?}", request); check_network(request.network_identifier, &server_context)?; + // Determine the actual operation from the collection of Rosetta [Operation] let internal_operation = InternalOperation::extract(&request.operations)?; + + // Provide the accounts that need public keys (there's only one supported today) let required_public_keys = vec![AccountIdentifier::base_account(internal_operation.sender())]; + // Verify that the max gas value is valid if let Some(max_gas) = request .metadata .as_ref() @@ -1270,11 +1321,14 @@ async fn construction_preprocess( ))); } } + + // Verify that expiration time is valid if let Some(expiry_time_secs) = request .metadata .as_ref() .and_then(|inner| inner.expiry_time_secs) { + // Probably should be greater than now + some amount of time, but for now it's valid if expiry_time_secs.0 <= SystemTime::now() .duration_since(UNIX_EPOCH) @@ -1291,12 +1345,12 @@ async fn construction_preprocess( } // Check gas input options - let public_keys = request .metadata .as_ref() .and_then(|inner| inner.public_keys.as_ref()); + // A public key can be provided for simulation, otherwise, a max gas amount would be given. if request .metadata .as_ref() @@ -1313,6 +1367,8 @@ async fn construction_preprocess( ))); } + // Convert it to an input to the metadata call + // TODO: Refactor so that it only does `request.metadata.as_ref()` once Ok(ConstructionPreprocessResponse { options: MetadataOptions { internal_operation, @@ -1360,6 +1416,7 @@ async fn construction_submit( let rest_client = server_context.rest_client()?; + // Submits the transaction, and returns the hash of the transaction let txn: SignedTransaction = decode_bcs(&request.signed_transaction, "SignedTransaction")?; let hash = txn.committed_hash(); rest_client.submit_bcs(&txn).await?; diff --git a/crates/aptos-rosetta/src/error.rs b/crates/aptos-rosetta/src/error.rs index 3f0ca50462bee..d1e9fffab7d50 100644 --- a/crates/aptos-rosetta/src/error.rs +++ b/crates/aptos-rosetta/src/error.rs @@ -9,8 +9,11 @@ use serde::{Deserialize, Serialize}; use std::fmt::Formatter; use warp::{http::StatusCode, reply::Reply}; +/// Result for Rosetta API errors pub type ApiResult = Result; +/// All Rosetta API errors. Note that all details must be `Option` to make it easier to list all +/// error messages in the `ApiError::all()` call required by the Rosetta spec. #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub enum ApiError { TransactionIsPending, @@ -60,6 +63,7 @@ impl std::fmt::Display for ApiError { impl std::error::Error for ApiError {} impl ApiError { + /// Returns every single API errors so the messages can be returned pub fn all() -> Vec { use ApiError::*; vec![ @@ -100,6 +104,7 @@ impl ApiError { ] } + /// All errors are required to have a code. These are just in order that they were added, and no specific grouping. pub fn code(&self) -> u32 { use ApiError::*; match self { @@ -140,6 +145,8 @@ impl ApiError { } } + /// Retriable errors will allow for Rosetta upstreams to retry. These are only for temporary + /// state blockers. Note, there is a possibility that some of these could be retriable forever (e.g. an account is never created). pub fn retriable(&self) -> bool { use ApiError::*; matches!( @@ -152,8 +159,8 @@ impl ApiError { ) } + /// All Rosetta errors must be 500s (and retriable tells you if it's actually retriable) pub fn status_code(&self) -> StatusCode { - // Per Rosetta guidelines, all errors are 500s StatusCode::INTERNAL_SERVER_ERROR } @@ -197,6 +204,7 @@ impl ApiError { } } + /// Details are optional, but give more details for each error message pub fn details(self) -> Option { match self { ApiError::DeserializationFailed(inner) => inner, @@ -233,6 +241,7 @@ impl ApiError { ApiError::DeserializationFailed(Some(type_.to_string())) } + /// Converts API Error into the wire representation pub fn into_error(self) -> types::Error { self.into() } @@ -253,6 +262,7 @@ impl From for types::Error { } } +// Converts Node API errors to Rosetta API errors impl From for ApiError { fn from(err: RestError) -> Self { match err { @@ -344,6 +354,7 @@ impl From for ApiError { } } +// Must implement to ensure rejections are provided when returning errors impl warp::reject::Reject for ApiError {} impl Reply for ApiError { diff --git a/crates/aptos-rosetta/src/lib.rs b/crates/aptos-rosetta/src/lib.rs index a23693081e59f..792084f9edf4f 100644 --- a/crates/aptos-rosetta/src/lib.rs +++ b/crates/aptos-rosetta/src/lib.rs @@ -9,13 +9,12 @@ use crate::{ block::BlockRetriever, common::{handle_request, with_context}, error::{ApiError, ApiResult}, - types::Store, }; use aptos_config::config::ApiConfig; -use aptos_logger::{debug, warn}; +use aptos_logger::debug; use aptos_types::{account_address::AccountAddress, chain_id::ChainId}; use aptos_warp_webserver::{logger, Error, WebServer}; -use std::{collections::BTreeMap, convert::Infallible, sync::Arc}; +use std::{convert::Infallible, sync::Arc}; use tokio::task::JoinHandle; use warp::{ http::{HeaderValue, Method, StatusCode}, @@ -44,8 +43,6 @@ pub struct RosettaContext { pub chain_id: ChainId, /// Block index cache pub block_cache: Option>, - pub owner_addresses: Vec, - pub pool_address_to_owner: BTreeMap, } impl RosettaContext { @@ -53,40 +50,11 @@ impl RosettaContext { rest_client: Option>, chain_id: ChainId, block_cache: Option>, - owner_addresses: Vec, ) -> Self { - let mut pool_address_to_owner = BTreeMap::new(); - if let Some(ref rest_client) = rest_client { - // We have to now fill in all of the mappings of owner to pool address - for owner_address in owner_addresses.iter() { - if let Ok(store) = rest_client - .get_account_resource_bcs::( - *owner_address, - "0x1::staking_contract::Store", - ) - .await - { - let store = store.into_inner(); - let pool_addresses: Vec<_> = store - .staking_contracts - .iter() - .map(|(_, pool)| pool.pool_address) - .collect(); - for pool_address in pool_addresses { - pool_address_to_owner.insert(pool_address, *owner_address); - } - } else { - warn!("Did not find a pool for owner: {}", owner_address); - } - } - } - RosettaContext { rest_client, chain_id, block_cache, - owner_addresses, - pool_address_to_owner, } } @@ -112,18 +80,12 @@ pub fn bootstrap( chain_id: ChainId, api_config: ApiConfig, rest_client: Option, - owner_addresses: Vec, ) -> anyhow::Result { let runtime = aptos_runtimes::spawn_named_runtime("rosetta".into(), None); debug!("Starting up Rosetta server with {:?}", api_config); - runtime.spawn(bootstrap_async( - chain_id, - api_config, - rest_client, - owner_addresses, - )); + runtime.spawn(bootstrap_async(chain_id, api_config, rest_client)); Ok(runtime) } @@ -132,7 +94,6 @@ pub async fn bootstrap_async( chain_id: ChainId, api_config: ApiConfig, rest_client: Option, - owner_addresses: Vec, ) -> anyhow::Result> { debug!("Starting up Rosetta server with {:?}", api_config); @@ -153,6 +114,8 @@ pub async fn bootstrap_async( let handle = tokio::spawn(async move { // If it's Online mode, add the block cache let rest_client = rest_client.map(Arc::new); + + // TODO: The BlockRetriever has no cache, and should probably be renamed from block_cache let block_cache = rest_client.as_ref().map(|rest_client| { Arc::new(BlockRetriever::new( api_config.max_transactions_page_size, @@ -160,8 +123,7 @@ pub async fn bootstrap_async( )) }); - let context = - RosettaContext::new(rest_client.clone(), chain_id, block_cache, owner_addresses).await; + let context = RosettaContext::new(rest_client.clone(), chain_id, block_cache).await; api.serve(routes(context)).await; }); Ok(handle) diff --git a/crates/aptos-rosetta/src/main.rs b/crates/aptos-rosetta/src/main.rs index d267b0eabb65c..1e136ae75f017 100644 --- a/crates/aptos-rosetta/src/main.rs +++ b/crates/aptos-rosetta/src/main.rs @@ -1,17 +1,17 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +//! Runs the Rosetta server directly. + #![forbid(unsafe_code)] use aptos_config::config::{ApiConfig, DEFAULT_MAX_PAGE_SIZE}; use aptos_logger::prelude::*; use aptos_node::AptosNodeArgs; use aptos_rosetta::bootstrap; -use aptos_sdk::move_types::account_address::AccountAddress; use aptos_types::chain_id::ChainId; use clap::Parser; use std::{ - fs::read_to_string, net::SocketAddr, path::PathBuf, sync::{ @@ -85,13 +85,8 @@ async fn main() { println!("aptos-rosetta: Starting rosetta"); // Ensure runtime for Rosetta is up and running - let _rosetta = bootstrap( - args.chain_id(), - args.api_config(), - args.rest_client(), - args.owner_addresses(), - ) - .expect("aptos-rosetta: Should bootstrap rosetta server"); + let _rosetta = bootstrap(args.chain_id(), args.api_config(), args.rest_client()) + .expect("aptos-rosetta: Should bootstrap rosetta server"); println!("aptos-rosetta: Rosetta started"); // Run until there is an interrupt @@ -111,9 +106,6 @@ trait ServerArgs { /// Retrieve the chain id fn chain_id(&self) -> ChainId; - - /// Retrieve owner addresses - fn owner_addresses(&self) -> Vec; } /// Aptos Rosetta API Server @@ -154,14 +146,6 @@ impl ServerArgs for CommandArgs { CommandArgs::Online(args) => args.chain_id(), } } - - fn owner_addresses(&self) -> Vec { - match self { - CommandArgs::OnlineRemote(args) => args.owner_addresses(), - CommandArgs::Offline(args) => args.owner_addresses(), - CommandArgs::Online(args) => args.owner_addresses(), - } - } } #[derive(Debug, Parser)] @@ -208,10 +192,6 @@ impl ServerArgs for OfflineArgs { fn chain_id(&self) -> ChainId { self.chain_id } - - fn owner_addresses(&self) -> Vec { - vec![] - } } #[derive(Debug, Parser)] @@ -221,7 +201,7 @@ pub struct OnlineRemoteArgs { /// URL for the Aptos REST API. e.g. https://fullnode.devnet.aptoslabs.com #[clap(long, default_value = "http://localhost:8080")] rest_api_url: url::Url, - /// Owner addresses file as a YAML file with a list + /// DEPRECATED: Owner addresses file as a YAML file with a list #[clap(long, value_parser)] owner_address_file: Option, } @@ -238,17 +218,6 @@ impl ServerArgs for OnlineRemoteArgs { fn chain_id(&self) -> ChainId { self.offline_args.chain_id } - - fn owner_addresses(&self) -> Vec { - if let Some(ref path) = self.owner_address_file { - serde_yaml::from_str( - &read_to_string(path.as_path()).expect("Failed to read owner address file"), - ) - .expect("Owner address file is in an invalid format") - } else { - vec![] - } - } } #[derive(Debug, Parser)] @@ -273,10 +242,6 @@ impl ServerArgs for OnlineLocalArgs { fn chain_id(&self) -> ChainId { self.online_args.offline_args.chain_id } - - fn owner_addresses(&self) -> Vec { - self.online_args.owner_addresses() - } } #[test] diff --git a/crates/aptos-rosetta/src/network.rs b/crates/aptos-rosetta/src/network.rs index 926ee55192c38..a9d3256334072 100644 --- a/crates/aptos-rosetta/src/network.rs +++ b/crates/aptos-rosetta/src/network.rs @@ -58,6 +58,7 @@ async fn network_list( "network_list", ); + // Rosetta server always only supports one chain at a time let response = NetworkListResponse { network_identifiers: vec![server_context.chain_id.into()], }; @@ -91,6 +92,7 @@ async fn network_options( middleware_version: "0.1.0".to_string(), }; + // Collect all possible responses allowed let operation_statuses = OperationStatusType::all() .into_iter() .map(|status| status.into()) @@ -108,10 +110,14 @@ async fn network_options( operation_statuses, operation_types, errors, + // Historical balances are allowed to be looked up (pruning is handled on the API) historical_balance_lookup: true, + // Timestamp starts on block 2 technically, since block 0 is genesis, and block 1 is the first block (without a timestamp) timestamp_start_index: 2, + // No call methods supported, possibly could be used for view functions in the future call_methods: vec![], balance_exemptions: vec![], + // Mempool lookup not supported mempool_coins: false, }; @@ -140,10 +146,14 @@ async fn network_status( let chain_id = server_context.chain_id; let rest_client = server_context.rest_client()?; let block_cache = server_context.block_cache()?; + + // Retrieve the genesis block info let genesis_block_identifier = block_cache .get_block_info_by_height(0, chain_id) .await? .block_id; + + // Retrieve current ledger state let response = rest_client.get_ledger_information().await?; let state = response.state(); diff --git a/crates/aptos-rosetta/src/types/identifiers.rs b/crates/aptos-rosetta/src/types/identifiers.rs index e6ac9a79b908a..42abe9d611308 100644 --- a/crates/aptos-rosetta/src/types/identifiers.rs +++ b/crates/aptos-rosetta/src/types/identifiers.rs @@ -36,6 +36,7 @@ impl AccountIdentifier { str_to_account_address(self.address.as_str()) } + /// Retrieve the pool address from an [`AccountIdentifier`], if it exists pub fn pool_address(&self) -> ApiResult> { if let Some(sub_account) = &self.sub_account { if let Some(metadata) = &sub_account.metadata { @@ -46,6 +47,7 @@ impl AccountIdentifier { Ok(None) } + /// Builds a normal account [`AccountIdentifier`] for a given address pub fn base_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -53,6 +55,7 @@ impl AccountIdentifier { } } + /// Builds a stake account [`AccountIdentifier`] for a given address to retrieve stake balances pub fn total_stake_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -60,6 +63,7 @@ impl AccountIdentifier { } } + /// Builds a pending active stake account [`AccountIdentifier`] for a given address to retrieve pending active stake balances pub fn pending_active_stake_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -67,6 +71,7 @@ impl AccountIdentifier { } } + /// Builds a active stake account [`AccountIdentifier`] for a given address to retrieve active stake balances pub fn active_stake_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -74,6 +79,7 @@ impl AccountIdentifier { } } + /// Builds a pending inactive stake account [`AccountIdentifier`] for a given address to retrieve pending inactive stake balances pub fn pending_inactive_stake_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -81,6 +87,7 @@ impl AccountIdentifier { } } + /// Builds a inactive stake account [`AccountIdentifier`] for a given address to retrieve inactive stake balances pub fn inactive_stake_account(address: AccountAddress) -> Self { AccountIdentifier { address: to_hex_lower(&address), @@ -88,6 +95,7 @@ impl AccountIdentifier { } } + /// Builds an operator stake account [`AccountIdentifier`] for a given address to retrieve operator stake balances pub fn operator_stake_account( address: AccountAddress, operator_address: AccountAddress, @@ -98,6 +106,7 @@ impl AccountIdentifier { } } + /// Returns true if the account doesn't have a sub account pub fn is_base_account(&self) -> bool { self.sub_account.is_none() } @@ -178,6 +187,7 @@ impl AccountIdentifier { } } + /// Retrieves the operator address if it has one in the sub-account pub fn operator_address(&self) -> ApiResult { if let Some(ref inner) = self.sub_account { inner.operator_address() @@ -189,14 +199,16 @@ impl AccountIdentifier { } } +/// Converts a string to an account address with error handling fn str_to_account_address(address: &str) -> Result { AccountAddress::from_str(address) .map_err(|_| ApiError::InvalidInput(Some("Invalid account address".to_string()))) } -/// There are two types of SubAccountIdentifiers +/// There are many types of SubAccountIdentifiers /// 1. `stake` which is the total stake /// 2. `stake-` which is the stake on the operator +/// 3. And more for pool addresses and various stake types #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct SubAccountIdentifier { /// Hex encoded AccountAddress beginning with 0x @@ -356,7 +368,7 @@ impl SubAccountIdentifierMetadata { } } -/// Identifier for a "block". In aptos, we use a transaction model, so the index +/// Identifier for a "block". On Aptos, we use a transaction model, so the index /// represents multiple transactions in a "block" grouping of transactions /// /// [API Spec](https://www.rosetta-api.org/docs/models/BlockIdentifier.html) @@ -364,7 +376,7 @@ impl SubAccountIdentifierMetadata { pub struct BlockIdentifier { /// Block index, which points to a txn at the beginning of a "block" pub index: u64, - /// Accumulator hash at the beginning of the block + /// A fake hash, that is actually `chain_id-block_height` pub hash: String, } diff --git a/crates/aptos-rosetta/src/types/misc.rs b/crates/aptos-rosetta/src/types/misc.rs index d7e49c0205949..ed0151417f34c 100644 --- a/crates/aptos-rosetta/src/types/misc.rs +++ b/crates/aptos-rosetta/src/types/misc.rs @@ -17,6 +17,7 @@ use std::{ str::FromStr, }; +// TODO: Move these to `move_types.rs` static DELEGATION_POOL_GET_STAKE_FUNCTION: Lazy = Lazy::new(|| "0x1::delegation_pool::get_stake".parse().unwrap()); static STAKE_GET_LOCKUP_SECS_FUNCTION: Lazy = @@ -29,6 +30,8 @@ static STAKING_CONTRACT_AMOUNTS_FUNCTION: Lazy = Lazy::new(|| { /// Errors that can be returned by the API /// +/// Internally [`ApiError`] is used, but it is converted to this for on wire representation +/// /// [API Spec](https://www.rosetta-api.org/docs/models/Error.html) #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct Error { @@ -59,7 +62,7 @@ pub struct OperationStatus { pub successful: bool, } -/// Represents a Peer, used for discovery +/// UNUSED Represents a Peer, used for discovery /// /// [API Spec](https://www.rosetta-api.org/docs/models/Peer.html) #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -67,6 +70,8 @@ pub struct Peer { peer_id: String, } +/// UNUSED Represents the current status of the node vs expected state +/// /// [API Spec](https://www.rosetta-api.org/docs/models/SyncStatus.html) #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct SyncStatus { @@ -101,6 +106,10 @@ pub struct BalanceResult { } /// An internal enum to support Operation typing +/// +/// NOTE: Order is important here for sorting later, this order must not change, and if there are new +/// types added, they should be added before Fee. We sort the sub operations so that they have a +/// stable order for things like transfers. #[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub enum OperationType { // Create must always be first for ordering @@ -140,6 +149,7 @@ impl OperationType { const WITHDRAW: &'static str = "withdraw"; const WITHDRAW_UNDELEGATED_FUNDS: &'static str = "withdraw_undelegated_funds"; + /// Returns all operations types, order doesn't matter. pub fn all() -> Vec { use OperationType::*; vec![ @@ -165,6 +175,7 @@ impl FromStr for OperationType { type Err = ApiError; fn from_str(s: &str) -> Result { + // Handles string to operation Rust typing match s.to_lowercase().trim() { Self::CREATE_ACCOUNT => Ok(OperationType::CreateAccount), Self::DEPOSIT => Ok(OperationType::Deposit), @@ -276,6 +287,7 @@ impl Display for OperationStatusType { } } +/// Retrieves stake balances for an owner with the associated pool pub async fn get_stake_balances( rest_client: &aptos_rest_client::Client, owner_account: &AccountIdentifier, @@ -283,6 +295,8 @@ pub async fn get_stake_balances( version: u64, ) -> ApiResult> { const STAKE_POOL: &str = "0x1::stake::StakePool"; + + // Retreive the pool resource if let Ok(response) = rest_client .get_account_resource_at_version_bcs::(pool_address, STAKE_POOL, version) .await @@ -362,6 +376,7 @@ pub async fn get_stake_balances( } } +/// Retrieve delegation stake balances for a given owner, pool, and version pub async fn get_delegation_stake_balances( rest_client: &aptos_rest_client::Client, account_identifier: &AccountIdentifier, diff --git a/crates/aptos-rosetta/src/types/move_types.rs b/crates/aptos-rosetta/src/types/move_types.rs index f57630361836f..7bdb7e2faf16a 100644 --- a/crates/aptos-rosetta/src/types/move_types.rs +++ b/crates/aptos-rosetta/src/types/move_types.rs @@ -39,7 +39,8 @@ pub const SWITCH_OPERATOR_WITH_SAME_COMMISSION_FUNCTION: &str = "switch_operator_with_same_commission"; pub const UPDATE_VOTER_FUNCTION: &str = "update_voter"; pub const UNLOCK_STAKE_FUNCTION: &str = "unlock_stake"; -// TODO fix the typo in function name. commision -> commission +// TODO fix the typo in function name. commision -> commission (this has to be done on-chain first) +// TODO: Handle update_commission and update_commision pub const UPDATE_COMMISSION_FUNCTION: &str = "update_commision"; pub const DISTRIBUTE_STAKING_REWARDS_FUNCTION: &str = "distribute"; @@ -239,7 +240,7 @@ pub struct UndelegationEvent { } #[derive(Debug, Serialize, Deserialize)] -pub struct WithdrawUndelegedEvent { +pub struct WithdrawUndelegatedEvent { pub pool_address: AccountAddress, pub delegator_address: AccountAddress, pub amount_withdrawn: u64, diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index 92af38a1e3e2f..c1dd26ca6d7ae 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -28,8 +28,8 @@ use aptos_logger::warn; use aptos_rest_client::aptos_api_types::{TransactionOnChainData, U64}; use aptos_types::{ account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResource, WithdrawEvent}, - contract_event::ContractEvent, + account_config::{AccountResource, CoinStoreResourceUntyped, WithdrawEvent}, + contract_event::{ContractEvent, FEE_STATEMENT_EVENT_TYPE}, event::EventKey, fee_statement::FeeStatement, stake_pool::{SetOperatorEvent, StakePool}, @@ -106,7 +106,7 @@ impl Amount { #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct BalanceExemption {} -/// Representation of a Block for a blockchain. For aptos it is the version +/// Representation of a Block for a blockchain. /// /// [API Spec](https://www.rosetta-api.org/docs/models/Block.html) #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -879,6 +879,7 @@ impl Transaction { server_context: &RosettaContext, txn: TransactionOnChainData, ) -> ApiResult { + // Parses the events, changesets, and metadata out of each transaction use aptos_types::transaction::Transaction::*; let (txn_type, maybe_user_txn, txn_info, events) = match &txn.transaction { UserTransaction(user_txn) => { @@ -965,6 +966,8 @@ impl Transaction { )); } + // TODO: Handle storage gas refund (though nothing currently in Rosetta refunds) + Ok(Transaction { transaction_identifier: (&txn_info).into(), operations, @@ -1155,6 +1158,7 @@ fn parse_failed_operations_from_txn_payload( operations } +/// Parses a 0x1::coin::transfer to a Withdraw and Deposit fn parse_transfer_from_txn_payload( payload: &EntryFunction, currency: Currency, @@ -1294,6 +1298,7 @@ async fn parse_operations_from_write_set( } } +/// Parses any account resource changes, in this case only create account is supported fn parse_account_resource_changes( version: u64, address: AccountAddress, @@ -1530,6 +1535,7 @@ fn parse_stake_pool_resource_changes( Ok(operations) } +/// Handles 0x1::staking_contract resource changes async fn parse_staking_contract_resource_changes( owner_address: AccountAddress, data: &[u8], @@ -1682,6 +1688,7 @@ async fn parse_staking_contract_resource_changes( Ok(operations) } +/// Parses 0x1::staking_contract commission updates async fn parse_update_commission( _owner_address: AccountAddress, data: &[u8], @@ -1727,6 +1734,7 @@ async fn parse_update_commission( Ok(operations) } +/// Parses delegation pool changes to resources async fn parse_delegation_pool_resource_changes( _owner_address: AccountAddress, _data: &[u8], @@ -1748,7 +1756,7 @@ async fn parse_delegation_pool_resource_changes( struct_tag.name.as_str(), ) { (AccountAddress::ONE, DELEGATION_POOL_MODULE, WITHDRAW_STAKE_EVENT) => { - let event: WithdrawUndelegedEvent = + let event: WithdrawUndelegatedEvent = if let Ok(event) = bcs::from_bytes(e.event_data()) { event } else { @@ -1776,6 +1784,7 @@ async fn parse_delegation_pool_resource_changes( Ok(operations) } +/// Parses coin store direct changes, for withdraws and deposits async fn parse_coinstore_changes( currency: Currency, version: u64, @@ -1784,7 +1793,7 @@ async fn parse_coinstore_changes( events: &[ContractEvent], mut operation_index: u64, ) -> ApiResult> { - let coin_store: CoinStoreResource = if let Ok(coin_store) = bcs::from_bytes(data) { + let coin_store: CoinStoreResourceUntyped = if let Ok(coin_store) = bcs::from_bytes(data) { coin_store } else { warn!( @@ -1796,6 +1805,8 @@ async fn parse_coinstore_changes( let mut operations = vec![]; + // TODO: Handle Event V2 here for migration from Event V1 + // Skip if there is no currency that can be found let withdraw_amounts = get_amount_from_event(events, coin_store.withdraw_events().key()); for amount in withdraw_amounts { @@ -1860,6 +1871,7 @@ fn get_fee_statement_from_event(events: &[ContractEvent]) -> Vec { .collect() } +/// Filters events given a specific event key fn filter_events Option, T>( events: &[ContractEvent], event_key: &EventKey, @@ -1909,8 +1921,10 @@ pub enum InternalOperation { impl InternalOperation { /// Pulls the [`InternalOperation`] from the set of [`Operation`] + /// TODO: this needs to be broken up pub fn extract(operations: &Vec) -> ApiResult { match operations.len() { + // Single operation actions 1 => { if let Some(operation) = operations.first() { match OperationType::from_str(&operation.operation_type) { @@ -2155,7 +2169,9 @@ impl InternalOperation { operations )))) }, + // Double operation actions (only coin transfer) 2 => Ok(Self::Transfer(Transfer::extract_transfer(operations)?)), + // Anything else is not expected _ => Err(ApiError::InvalidOperations(Some(format!( "Unrecognized operation combination {:?}", operations diff --git a/crates/aptos-rosetta/src/types/requests.rs b/crates/aptos-rosetta/src/types/requests.rs index 32d74addcff11..e88637f08e60c 100644 --- a/crates/aptos-rosetta/src/types/requests.rs +++ b/crates/aptos-rosetta/src/types/requests.rs @@ -58,7 +58,8 @@ pub struct AccountBalanceMetadata { pub operators: Option>, pub lockup_expiration_time_utc: U64, } -/// Reqyest a block (version) on the account + +/// Request a block (version) on the account /// /// With neither value for PartialBlockIdentifier, get the latest version /// diff --git a/crates/aptos-telemetry-service/src/clients/mod.rs b/crates/aptos-telemetry-service/src/clients/mod.rs index e96d697f90d52..e357f60b4f141 100644 --- a/crates/aptos-telemetry-service/src/clients/mod.rs +++ b/crates/aptos-telemetry-service/src/clients/mod.rs @@ -60,6 +60,13 @@ pub mod victoria_metrics_api { } } + pub fn is_selfhosted_vm_client(&self) -> bool { + self.base_url + .host_str() + .unwrap_or_default() + .contains("aptos-all.vm") + } + pub async fn post_prometheus_metrics( &self, raw_metrics_body: Bytes, diff --git a/crates/aptos-telemetry-service/src/peer_location.rs b/crates/aptos-telemetry-service/src/peer_location.rs index 2226ba76e860f..37ccf8622b0ec 100644 --- a/crates/aptos-telemetry-service/src/peer_location.rs +++ b/crates/aptos-telemetry-service/src/peer_location.rs @@ -3,9 +3,12 @@ use crate::metrics::{BIG_QUERY_REQUEST_FAILURES_TOTAL, BIG_QUERY_REQUEST_TOTAL}; use aptos_infallible::RwLock; -use aptos_types::PeerId; -use gcp_bigquery_client::{model::query_request::QueryRequest, Client as BigQueryClient}; -use std::{collections::HashMap, str::FromStr, sync::Arc, time::Duration}; +use aptos_types::{chain_id::ChainId, PeerId}; +use gcp_bigquery_client::{ + model::{query_request::QueryRequest, query_response::ResultSet}, + Client as BigQueryClient, +}; +use std::{collections::HashMap, env, str::FromStr, sync::Arc, time::Duration}; const ANALYTICS_PROJECT_ID: &str = "analytics-test-345723"; @@ -36,10 +39,14 @@ impl PeerLocationUpdater { pub fn run(self) -> anyhow::Result<()> { tokio::spawn(async move { loop { - let locations = query_peer_locations(&self.client).await.unwrap(); - { - let mut peer_locations = self.peer_locations.write(); - *peer_locations = locations; + match query_peer_locations(&self.client).await { + Ok(locations) => { + let mut peer_locations = self.peer_locations.write(); + *peer_locations = locations; + }, + Err(e) => { + aptos_logger::error!("Failed to query peer locations: {}", e); + }, } tokio::time::sleep(Duration::from_secs(3600)).await; // 1 hour } @@ -48,29 +55,49 @@ impl PeerLocationUpdater { } } +fn get_chain_id() -> ChainId { + match env::var("GCP_METADATA_PROJECT_ID") { + Ok(val) if val == "aptos-telemetry-svc-mainnet" => ChainId::mainnet(), + Ok(val) if val == "aptos-telemetry-svc-dev" => ChainId::testnet(), + _ => { + aptos_logger::warn!("Unknown GCP_METADATA_PROJECT_ID, defaulting to test"); + ChainId::test() + }, + } +} + +fn process_row( + res: &mut ResultSet, + current_chain_id: &str, + map: &mut HashMap, +) -> anyhow::Result<()> { + let peer_id_raw = res + .get_string_by_name("peer_id")? + .ok_or_else(|| anyhow::anyhow!("Missing peer_id"))?; + let chain_id = res.get_string_by_name("chain_id")?; + + if chain_id.as_deref() != Some(current_chain_id) { + return Ok(()); + } + + let peer_id = PeerId::from_str(&peer_id_raw)?; + let location = PeerLocation { + peer_id, + geo_updated_at: res.get_string_by_name("update_timestamp")?, + country: res.get_string_by_name("country")?, + region: res.get_string_by_name("region")?, + }; + map.entry(peer_id).or_insert(location); + Ok(()) +} + pub async fn query_peer_locations( client: &BigQueryClient, ) -> anyhow::Result> { - let req = QueryRequest::new(" - SELECT - sq.peer_id, - sq.country, - sq.region, - '1985-04-12T23:20:50.52Z' as geo_updated_at - FROM ( - SELECT - tm.peer_id, - tm.epoch, - ROW_NUMBER() OVER (PARTITION BY tm.peer_id ORDER BY tm.epoch DESC) AS row_number, - tm.country, - tm.region - FROM - `node-telemetry.aptos_node_telemetry.custom_events_mainnet_telemetry_rollup_metrics` tm) sq - WHERE - sq.row_number = 1 - LIMIT - 1000 - "); + let current_chain_id = get_chain_id().id().to_string(); + let query = env::var("PEER_LOCATION_QUERY")?; + + let req = QueryRequest::new(query); let req = QueryRequest { timeout_ms: Some(10000), ..req @@ -90,25 +117,11 @@ pub async fn query_peer_locations( let mut map = HashMap::new(); while res.next_row() { - if let Some(peer_id_raw) = res.get_string_by_name("peer_id")? { - match PeerId::from_str(&peer_id_raw) { - Ok(peer_id) => { - let location = PeerLocation { - peer_id, - geo_updated_at: res.get_string_by_name("geo_updated_at")?, - country: res.get_string_by_name("country")?, - region: res.get_string_by_name("region")?, - }; - map.entry(peer_id).or_insert(location); - }, - Err(e) => { - aptos_logger::error!("Failed to parse peer_id: {}", e); - }, - } - } + process_row(&mut res, ¤t_chain_id, &mut map)?; } Ok(map) } + #[cfg(feature = "bigquery_integration_tests")] mod tests { use super::*; @@ -116,6 +129,9 @@ mod tests { #[tokio::test] async fn test_query() { + env::set_var("GCP_METADATA_PROJECT_ID", "aptos-telemetry-svc-dev"); + env::set_var("PEER_LOCATION_QUERY", ""); + let client = BigQueryClient::from_application_default_credentials() .await .unwrap(); diff --git a/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs b/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs index 740ad64bab188..c39c478cee913 100644 --- a/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs +++ b/crates/aptos-telemetry-service/src/prometheus_push_metrics.rs @@ -11,8 +11,9 @@ use crate::{ types::{auth::Claims, common::NodeType}, }; use aptos_types::PeerId; +use rand::Rng; use reqwest::{header::CONTENT_ENCODING, StatusCode}; -use std::time::Duration; +use std::{env, time::Duration}; use tokio::time::Instant; use warp::{filters::BoxedFilter, hyper::body::Bytes, reject, reply, Filter, Rejection, Reply}; @@ -44,15 +45,37 @@ pub async fn handle_metrics_ingest( ) -> anyhow::Result { debug!("handling prometheus metrics ingest"); - let mut extra_labels = Vec::new(); - extra_labels.extend(claims_to_extra_labels( + let enable_location_labels = env::var("FEATURE_LOCATION_LABELS_ENABLED") + .map(|val| val.parse::().unwrap_or(false)) + .unwrap_or(false); + + let enable_random_label = env::var("FEATURE_RANDOM_LABEL_ENABLED") + .map(|val| val.parse::().unwrap_or(false)) + .unwrap_or(false); + + let max_random_value = env::var("FEATURE_RANDOM_LABEL_MAX_VALUE") + .map(|val| val.parse::().unwrap_or(20)) + .unwrap_or(20); + + let mut extra_labels = claims_to_extra_labels( &claims, context .peer_identities() .get(&claims.chain_id) .and_then(|peers| peers.get(&claims.peer_id)), - )); - extra_labels.extend(peer_location_labels(&context, &claims.peer_id)); + ); + if enable_location_labels { + extra_labels.extend_from_slice(&peer_location_labels(&context, &claims.peer_id)); + } + + let extra_labels_with_random_label = if enable_random_label { + let random_num = rand::thread_rng().gen_range(0, max_random_value); + let mut labels = extra_labels.clone(); + labels.push(format!("random_label={}", random_num)); + labels + } else { + extra_labels.clone() + }; let client = match claims.node_type { NodeType::UnknownValidator | NodeType::UnknownFullNode => { @@ -64,6 +87,11 @@ pub async fn handle_metrics_ingest( let start_timer = Instant::now(); let post_futures = client.iter().map(|(name, client)| async { + let extra_labels = if client.is_selfhosted_vm_client() { + extra_labels_with_random_label.clone() + } else { + extra_labels.clone() + }; let result = tokio::time::timeout( Duration::from_secs(MAX_METRICS_POST_WAIT_DURATION_SECS), client.post_prometheus_metrics( diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index 576bf4859d97b..f279dba51406d 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -3,7 +3,37 @@ All notable changes to the Supra CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## Unreleased + +## [4.2.0] - 2024/09/16 +- Update latest VM and associated changes +- Update to latest compiler + +## [4.1.0] - 2024/08/30 +- Marks Move 2 and compiler v2 as stable. +- Adds new `--move-2` flag to work with Move 2 without need for multiple other flags. +- Adds `aptos move lint` to produce lint warnings for the current package. Only a few lint rules are implemented for now, + but more are coming. +- Adds `aptos move fmt`, which runs the Move formatter, `movefmt`, on the current package. Also adds + `aptos update movefmt`. This installs / updates the `movefmt` binary. +- Adds safe methods to delete a profile, to rename a profile, and to output the private key of a profile. + +## [4.0.0] - 2024/08/13 +- **Breaking Change**: change key rotation options such that user has to either pass the name of a new profile or explicitly flag that no profile should be generated, since without this update the interactive profile generator could fail out after the key has already been rotated. This forces the check for new profile validity before doing anything onchain. +- Add support for key rotation to/from Ledger hardware wallets. +- Fixes a bug in the Move Prover leading to internal error in generated boogie (error 'global `#0_info` cannot be accessed') +- **Breaking Change**: A new native function to compute serialized size of a Move value is now supported. + +## [3.5.1] - 2024/07/21 +- Upgraded indexer processors for localnet from 5244b84fa5ed872e5280dc8df032d744d62ad29d to fa1ce4947f4c2be57529f1c9732529e05a06cb7f. Upgraded Hasura metadata accordingly. +- Upgraded Hasura image from 2.36.1 to 2.40.2-ce. Note that we use the Community Edition, so the console won't ask users to upgrade to enterprise anymore / hint at any enterprise features. +- Fixes a bug in the Move compiler (both v1 and v2) which disallowed `match` as a name for a function or for a variable. + +## [3.5.0] - 2024/07/06 - Add balance command to easily get account balances for APT currently +- Add network to config file +- Add explorer links to initialized accounts, and transaction submissions +- Alias some move commands as common misnomers (e.g. build -> compile, deploy -> publish) +- Add "hello_blockchain" template to move init command ## [3.4.1] - 2024/05/31 - Upgraded indexer processors for localnet from ca60e51b53c3be6f9517de7c73d4711e9c1f7236 to 5244b84fa5ed872e5280dc8df032d744d62ad29d. Upgraded Hasura metadata accordingly. diff --git a/crates/aptos/CONTRIBUTING.md b/crates/aptos/CONTRIBUTING.md new file mode 100644 index 0000000000000..7bd0fe7d976ee --- /dev/null +++ b/crates/aptos/CONTRIBUTING.md @@ -0,0 +1,247 @@ +# Aptos CLI Development Guide + +This is a list of design decisions and guidelines for adding commands to the Aptos CLI. + +## Command Groups + +Commands should be grouped into the existing categories. The current categories are: + +- account +- config +- genesis +- governance +- key +- move +- multisig +- node +- stake +- update + +All categories must have a doc comment that describes the command. It must also derive `Parser` and `Subcommand`. For +example: + +```rust +/// Tool for interacting with accounts +/// +/// This tool is used to create accounts, get information about the +/// account's resources, and transfer resources between accounts. +#[derive(Debug, Subcommand)] +pub enum AccountTool { + Create(create::CreateAccount), + CreateResourceAccount(create_resource_account::CreateResourceAccount), + DeriveResourceAccountAddress(derive_resource_account::DeriveResourceAccount), + FundWithFaucet(fund::FundWithFaucet), + Balance(balance::Balance), + List(list::ListAccount), + LookupAddress(key_rotation::LookupAddress), + RotateKey(key_rotation::RotateKey), + Transfer(transfer::TransferCoins), +} +``` + +Then it must also be added to the top level command structure: + +```rust +/// Command Line Interface (CLI) for developing and interacting with the Aptos blockchain +#[derive(Parser)] +#[clap(name = "aptos", author, version, propagate_version = true, styles = aptos_cli_common::aptos_cli_style())] +pub enum Tool { + #[clap(subcommand)] + Account(account::AccountTool), + #[clap(subcommand)] + Config(config::ConfigTool), + #[clap(subcommand)] + Genesis(genesis::GenesisTool), + #[clap(subcommand)] + Governance(governance::GovernanceTool), + Info(InfoTool), + Init(common::init::InitTool), + #[clap(subcommand)] + Key(op::key::KeyTool), + #[clap(subcommand)] + Move(move_tool::MoveTool), + #[clap(subcommand)] + Multisig(account::MultisigAccountTool), + #[clap(subcommand)] + Node(node::NodeTool), + #[clap(subcommand)] + Stake(stake::StakeTool), + #[clap(subcommand)] + Update(update::UpdateTool), +} +``` + +## Commands + +A command is a single top level command for the CLI. The CLI command must complete it's action in the single command +execution. + +### Command Names + +```rust +/// Compiles a package and returns the associated ModuleIds +#[derive(Parser)] +pub struct CompilePackage { + /// Save the package metadata in the package's build directory + /// + /// If set, package metadata should be generated and stored in the package's build directory. + /// This metadata can be used to construct a transaction to publish a package. + #[clap(long)] + pub(crate) save_metadata: bool, + + #[clap(flatten)] + pub(crate) included_artifacts_args: IncludedArtifactsArgs, + #[clap(flatten)] + pub(crate) move_options: MovePackageDir, +} +``` + +Command names should be simple, identifiable, and easy to use. For example, compilation is grouped in `move` and uses +the subcommand `compile`. + +```bash +aptos move compile +``` + +Once the new command is created, it should have `#[derive(Parser)]` added above. Additionally, it will need to be added +the higher level tool: + +```rust +#[derive(Subcommand)] +pub enum MoveTool { + #[clap(alias = "build")] + Compile(CompilePackage), + #[clap(alias = "build-script")] + CompileScript(CompileScript), + Init(Init), + // ... +} + +impl MoveTool { + pub async fn execute(self) -> CliResult { + match self { + MoveTool::Compile(tool) => tool.execute_serialized().await, + MoveTool::CompileScript(tool) => tool.execute_serialized().await, + MoveTool::Init(tool) => tool.execute_serialized_success().await, + } + } +} +``` + +Note that, there are two types of commands here `execute_serialized()` and `execute_serialized_success()`, if the +command must be returning a value, then it should call `execute_serialized()`, which will convert the input type as JSON +to `stdout`. + +Additionally, `alias` is allowed, but discouraged for new commands. This is mostly to provide either backwards +compatibility or reduce confusion for new users. + +### Command flags + +```rust +#[derive(Parser)] +pub struct CompilePackage { + /// Save the package metadata in the package's build directory + /// + /// If set, package metadata should be generated and stored in the package's build directory. + /// This metadata can be used to construct a transaction to publish a package. + #[clap(long)] + pub(crate) save_metadata: bool, + + // ... +} +``` + +Command inputs should always be documented for help to show up in the CLI. for example, below is the example for +`save_metadata`. They should be snake case, and will show up as a flag. Do not use `short` commands, as they can be +confused between different commands. + +```bash +aptos move compile --save-metadata +``` + +### Command flag groupings + +```rust +/// Compiles a package and returns the associated ModuleIds +#[derive(Parser)] +pub struct CompilePackage { + // ... + #[clap(flatten)] + pub(crate) included_artifacts_args: IncludedArtifactsArgs, + #[clap(flatten)] + pub(crate) move_options: MovePackageDir, +} +``` + +Command flags can be grouped into common structs to be used across multiple commands. These should be flattened by +adding the struct associated and using `#[clap(flatten)]` like above. These should not have a doc comment, and any doc +comments will not end up in the command. Instead, document the structs directly like so: + +```rust +#[derive(Parser)] +pub struct IncludedArtifactsArgs { + /// Artifacts to be generated when building the package + /// + /// Which artifacts to include in the package. This can be one of `none`, `sparse`, and + /// `all`. `none` is the most compact form and does not allow to reconstruct a source + /// package from chain; `sparse` is the minimal set of artifacts needed to reconstruct + /// a source package; `all` includes all available artifacts. The choice of included + /// artifacts heavily influences the size and therefore gas cost of publishing: `none` + /// is the size of bytecode alone; `sparse` is roughly 2 times as much; and `all` 3-4 + /// as much. + #[clap(long, default_value_t = IncludedArtifacts::Sparse)] + pub(crate) included_artifacts: IncludedArtifacts, +} +``` + +### Command Implementation + +```rust +#[async_trait] +impl CliCommand> for CompilePackage { + fn command_name(&self) -> &'static str { + "CompilePackage" + } + + async fn execute(self) -> CliTypedResult> { + let build_options = BuildOptions { + install_dir: self.move_options.output_dir.clone(), + ..self + .included_artifacts_args + .included_artifacts + .build_options( + self.move_options.dev, + self.move_options.skip_fetch_latest_git_deps, + self.move_options.named_addresses(), + self.move_options.override_std.clone(), + self.move_options.bytecode_version, + self.move_options.compiler_version, + self.move_options.language_version, + self.move_options.skip_attribute_checks, + self.move_options.check_test_code, + ) + }; + let pack = BuiltPackage::build(self.move_options.get_package_path()?, build_options) + .map_err(|e| CliError::MoveCompilationError(format!("{:#}", e)))?; + if self.save_metadata { + pack.extract_metadata_and_save()?; + } + let ids = pack + .modules() + .map(|m| m.self_id().to_string()) + .collect::>(); + // TODO: Also say how many scripts are compiled + Ok(ids) + } +} +``` + +Commands should implement the `CliCommand` trait for the package. This allows it to be called upstream generically +and `T` will automatically be serialized to JSON for the output. This allows for typed testing in unit tests, while +still having output converted for the total CLI. + +It's an anti-pattern to `panic`, please avoid panicking, and instead provide `CliError` or `CliError` conversion for the +current types. + +All output from the CLI should use `eprintln!()`, rather than `println!()`. `stdout` is reserved for the JSON output at +the end of the command, `stderr` is used for the rest of the output. diff --git a/crates/aptos/Cargo.toml b/crates/aptos/Cargo.toml index 05e933e6d4e34..27438b55bcc15 100644 --- a/crates/aptos/Cargo.toml +++ b/crates/aptos/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "aptos" description = "Aptos tool for management of nodes and interacting with the blockchain" -version = "3.4.1" +version = "4.2.0" # Workspace inherited keys authors = { workspace = true } @@ -53,8 +53,9 @@ base64 = { workspace = true } bcs = { workspace = true } bollard = { workspace = true } chrono = { workspace = true } -clap = { workspace = true, features = ["env", "unstable-styles"] } +clap = { workspace = true, features = ["env", "unstable-styles", "wrap_help"] } clap_complete = { workspace = true } +colored = { workspace = true } dashmap = { workspace = true } diesel = { workspace = true, features = [ "postgres_backend", @@ -70,6 +71,7 @@ move-bytecode-source-map = { workspace = true } move-cli = { workspace = true } move-command-line-common = { workspace = true } move-compiler = { workspace = true } +move-compiler-v2 = { workspace = true } move-core-types = { workspace = true } move-coverage = { workspace = true } move-disassembler = { workspace = true } @@ -83,14 +85,15 @@ pathsearch = { workspace = true } poem = { workspace = true } # We set default-features to false so we don't onboard the libpq dep. See more here: # https://github.com/aptos-labs/aptos-core/pull/12568 -processor = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "5244b84fa5ed872e5280dc8df032d744d62ad29d", default-features = false } +processor = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "fa1ce4947f4c2be57529f1c9732529e05a06cb7f", default-features = false } rand = { workspace = true } +regex = { workspace = true } reqwest = { workspace = true } self_update = { git = "https://github.com/banool/self_update.git", rev = "8306158ad0fd5b9d4766a3c6bf967e7ef0ea5c4b", features = ["archive-zip", "compression-zip-deflate"] } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -server-framework = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "5244b84fa5ed872e5280dc8df032d744d62ad29d" } +server-framework = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "fa1ce4947f4c2be57529f1c9732529e05a06cb7f" } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } diff --git a/crates/aptos/e2e/cases/account.py b/crates/aptos/e2e/cases/account.py index f4b4e8c02c469..ffe301b30d629 100644 --- a/crates/aptos/e2e/cases/account.py +++ b/crates/aptos/e2e/cases/account.py @@ -154,14 +154,14 @@ def test_account_rotate_key(run_helper: RunHelper, test_name=None): "rotate-key", "--new-private-key", new_private_key, + "--skip-saving-profile", "--assume-yes", ], - input="no\n", ) if '"success": true' not in result.stdout: raise TestError( - f"[aptos account rotate-key --new-private-key {new_private_key} --assume-yes] failed" + f"[aptos account rotate-key --new-private-key {new_private_key} --skip-saving-profile --assume-yes] failed" ) new_profile = run_helper.get_account_info() diff --git a/crates/aptos/src/account/balance.rs b/crates/aptos/src/account/balance.rs index b519471086df6..9aea1d6dd3aea 100644 --- a/crates/aptos/src/account/balance.rs +++ b/crates/aptos/src/account/balance.rs @@ -5,7 +5,7 @@ use crate::common::types::{ CliCommand, CliConfig, CliError, CliTypedResult, ConfigSearchMode, ProfileOptions, RestOptions, }; use aptos_api_types::ViewFunction; -use aptos_types::{account_address::AccountAddress, SUPRA_COIN_TYPE}; +use aptos_types::{account_address::AccountAddress, SupraCoinType, CoinType}; use async_trait::async_trait; use clap::Parser; use move_core_types::{ident_str, language_storage::ModuleId, parser::parse_type_tag}; @@ -20,7 +20,7 @@ pub struct Balance { #[clap(long, value_parser = crate::common::types::load_account_arg)] pub(crate) account: Option, - /// Coin type to lookup. Defaults to 0x1::aptos_coin::AptosCoin + /// Coin type to lookup. Defaults to 0x1::supra_coin::SupraCoin #[clap(long)] pub(crate) coin_type: Option, @@ -66,7 +66,7 @@ impl CliCommand> for Balance { })? } else { // If nothing is given, use the default SUPRA - SUPRA_COIN_TYPE.to_owned() + SupraCoinType::type_tag() }; let client = self.rest_options.client(&self.profile_options)?; diff --git a/crates/aptos/src/account/fund.rs b/crates/aptos/src/account/fund.rs index c2ea9e57a4b6f..4764480f4354d 100644 --- a/crates/aptos/src/account/fund.rs +++ b/crates/aptos/src/account/fund.rs @@ -13,20 +13,20 @@ use clap::Parser; /// /// This will create an account if it doesn't exist with the faucet. This is mostly useful /// for local development and devnet. -#[derive(Debug, Parser)] +#[derive(Debug, Default, Parser)] pub struct FundWithFaucet { /// Address to fund /// /// If the account wasn't previously created, it will be created when being funded #[clap(long, value_parser = crate::common::types::load_account_arg)] - pub(crate) account: Option, + pub account: Option, /// Number of Quants to fund the account from the faucet /// /// The amount added to the account may be limited by the faucet, and may be less /// than the amount requested. #[clap(long, default_value_t = DEFAULT_FUNDED_COINS)] - pub(crate) amount: u64, + pub amount: u64, #[clap(flatten)] pub(crate) faucet_options: FaucetOptions, diff --git a/crates/aptos/src/account/key_rotation.rs b/crates/aptos/src/account/key_rotation.rs index e964d4e93ee4c..0d26e85a97856 100644 --- a/crates/aptos/src/account/key_rotation.rs +++ b/crates/aptos/src/account/key_rotation.rs @@ -1,14 +1,11 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::common::{ - types::{ - account_address_from_auth_key, account_address_from_public_key, - AuthenticationKeyInputOptions, CliCommand, CliConfig, CliError, CliTypedResult, - ConfigSearchMode, EncodingOptions, ExtractPublicKey, ParsePrivateKey, ProfileConfig, - ProfileOptions, PublicKeyInputOptions, RestOptions, TransactionOptions, TransactionSummary, - }, - utils::{prompt_yes, prompt_yes_with_override, read_line}, +use crate::common::types::{ + account_address_from_auth_key, account_address_from_public_key, AuthenticationKeyInputOptions, + CliCommand, CliConfig, CliError, CliTypedResult, ConfigSearchMode, EncodingOptions, + ExtractPublicKey, HardwareWalletOptions, ParsePrivateKey, ProfileConfig, ProfileOptions, + PublicKeyInputOptions, RestOptions, TransactionOptions, TransactionSummary, }; use aptos_cached_packages::aptos_stdlib; use aptos_crypto::{ @@ -16,6 +13,7 @@ use aptos_crypto::{ encoding_type::EncodingType, PrivateKey, SigningKey, }; +use aptos_ledger; use aptos_rest_client::{ aptos_api_types::{AptosError, AptosErrorCode}, error::{AptosErrorResponse, RestError}, @@ -27,7 +25,7 @@ use aptos_types::{ transaction::authenticator::AuthenticationKey, }; use async_trait::async_trait; -use clap::Parser; +use clap::{Args, Parser}; use serde::{Deserialize, Serialize}; use std::{collections::BTreeMap, path::PathBuf}; @@ -38,31 +36,57 @@ use std::{collections::BTreeMap, path::PathBuf}; /// rotated you will need to use the original account address, with the /// new private key. There is an interactive prompt to help you add it /// to a new profile. +/// +/// If you wish to rotate from a ledger wallet, it must have its own +/// profile. If you wish to rotate to a ledger wallet, specify the new +/// derivation path or index accordingly. #[derive(Debug, Parser)] pub struct RotateKey { #[clap(flatten)] pub(crate) txn_options: TransactionOptions, + #[clap(flatten)] + pub(crate) new_auth_key_options: NewAuthKeyOptions, + + #[clap(flatten)] + pub(crate) new_profile_options: NewProfileOptions, +} + +#[derive(Args, Debug)] +#[group(required = true, multiple = false)] +pub(crate) struct NewAuthKeyOptions { /// File name that contains the new private key encoded in the type from `--encoding` - #[clap(long, group = "new_private_key_inputs", value_parser)] + #[clap(long, value_parser)] pub(crate) new_private_key_file: Option, /// New private key encoded in the type from `--encoding` - #[clap(long, group = "new_private_key_inputs")] + #[clap(long)] pub(crate) new_private_key: Option, - /// Name of the profile to save the new private key + /// BIP44 derivation path of hardware wallet account, e.g. `m/44'/637'/0'/0'/0'` /// - /// If not provided, it will interactively have you save a profile, - /// unless `--skip_saving_profile` is provided + /// Note you may need to escape single quotes in your shell, for example + /// `m/44'/637'/0'/0'/0'` would be `m/44\'/637\'/0\'/0\'/0\'` #[clap(long)] - pub(crate) save_to_profile: Option, + pub(crate) new_derivation_path: Option, - /// Skip saving profile + /// BIP44 account index of hardware wallet account, e.g. `0` /// - /// This skips the interactive profile saving after rotating the authentication key + /// Given index `n` maps to BIP44 derivation path `m/44'/637'/n'/0'/0` + #[clap(long)] + pub(crate) new_derivation_index: Option, +} + +#[derive(Args, Debug)] +#[group(required = true, multiple = false)] +pub(crate) struct NewProfileOptions { + /// Only specify if you do not want to save a new profile #[clap(long)] pub(crate) skip_saving_profile: bool, + + /// Name of new the profile to save for the new authentication key + #[clap(long)] + pub(crate) save_to_profile: Option, } impl ParsePrivateKey for RotateKey {} @@ -75,8 +99,8 @@ impl RotateKey { ) -> CliTypedResult> { self.parse_private_key( encoding, - self.new_private_key_file.clone(), - self.new_private_key.clone(), + self.new_auth_key_options.new_private_key_file.clone(), + self.new_auth_key_options.new_private_key.clone(), ) } } @@ -94,56 +118,147 @@ impl CliCommand for RotateKey { } async fn execute(self) -> CliTypedResult { - let new_private_key = self - .extract_private_key(self.txn_options.encoding_options.encoding)? - .ok_or_else(|| { - CliError::CommandArgumentError( - "One of ['--new-private-key', '--new-private-key-file'] must be used" - .to_string(), - ) - })?; - - let (current_private_key, sender_address) = self.txn_options.get_key_and_address()?; - - if new_private_key == current_private_key { + // Verify profile name before executing rotation operation, to avoid erroring out in a + // manner that results in corrupted config state. + if let Some(ref new_profile_name) = self.new_profile_options.save_to_profile { + if new_profile_name.is_empty() { + return Err(CliError::CommandArgumentError( + "New profile name may not be empty".to_string(), + )); + }; + + // Verify that config exists by attempting to load it. + let config = CliConfig::load(ConfigSearchMode::CurrentDirAndParents)?; + + // Verify that the new profile name does not already exist in the config. + if let Some(profiles) = config.profiles { + if profiles.contains_key(new_profile_name) { + return Err(CliError::CommandArgumentError(format!( + "Profile {} already exists", + new_profile_name + ))); + }; + } + }; + + // Get current signer options. + let current_derivation_path = if self.txn_options.profile_options.profile.is_some() { + self.txn_options.profile_options.derivation_path()? + } else { + None + }; + let (current_private_key, current_address, current_public_key) = if current_derivation_path + .is_some() + { + ( + None, + self.txn_options.profile_options.account_address()?, + self.txn_options.profile_options.public_key()?, + ) + } else { + let (current_private_key, current_address) = self.txn_options.get_key_and_address()?; + ( + Some(current_private_key), + current_address, + self.txn_options.get_public_key()?, + ) + }; + + // Get new signer options. + let new_hardware_wallet_options = HardwareWalletOptions { + derivation_path: self.new_auth_key_options.new_derivation_path.clone(), + derivation_index: self.new_auth_key_options.new_derivation_index.clone(), + }; + let new_derivation_path = new_hardware_wallet_options.extract_derivation_path()?; + let (new_private_key, new_public_key) = if new_derivation_path.is_some() { + ( + None, + aptos_ledger::get_public_key(new_derivation_path.clone().unwrap().as_str(), false)?, + ) + } else { + let new_private_key = self + .extract_private_key(self.txn_options.encoding_options.encoding)? + .ok_or_else(|| { + CliError::CommandArgumentError("Unable to parse new private key".to_string()) + })?; + (Some(new_private_key.clone()), new_private_key.public_key()) + }; + + // Check that public key is actually changing. + if new_public_key == current_public_key { return Err(CliError::CommandArgumentError( - "New private key cannot be the same as the current private key".to_string(), + "New public key cannot be the same as the current public key".to_string(), )); } - // Get sequence number for account - let sequence_number = self.txn_options.sequence_number(sender_address).await?; - let auth_key = self.txn_options.auth_key(sender_address).await?; - + // Construct rotation proof challenge. + let sequence_number = self.txn_options.sequence_number(current_address).await?; + let auth_key = self.txn_options.auth_key(current_address).await?; let rotation_proof = RotationProofChallenge { account_address: CORE_CODE_ADDRESS, module_name: "account".to_string(), struct_name: "RotationProofChallenge".to_string(), sequence_number, - originator: sender_address, + originator: current_address, current_auth_key: AccountAddress::from_bytes(auth_key) .map_err(|err| CliError::UnableToParse("auth_key", err.to_string()))?, - new_public_key: new_private_key.public_key().to_bytes().to_vec(), + new_public_key: new_public_key.to_bytes().to_vec(), }; - let rotation_msg = bcs::to_bytes(&rotation_proof).map_err(|err| CliError::BCS("rotation_proof", err))?; - // Signs the struct using both the current private key and the next private key + // Determine if current and new keys are hardware wallets, for better user feedback. + let current_is_hardware_wallet = current_derivation_path.is_some(); + let new_is_hardware_wallet = new_derivation_path.is_some(); + + // Sign the struct using both the current private key and the new private key. let rotation_proof_signed_by_current_private_key = - current_private_key.sign_arbitrary_message(&rotation_msg.clone()); + if let Some(current_derivation_path) = current_derivation_path.clone() { + eprintln!("Sign rotation proof challenge on your Ledger device (current key)"); + let challenge_signature = aptos_ledger::sign_message( + current_derivation_path.as_str(), + &rotation_msg.clone(), + )?; + eprintln!("Rotation proof challenge successfully signed (current key)"); + if !new_is_hardware_wallet { + eprintln!("You will still need to sign the transaction on your Ledger device"); + } + challenge_signature + } else { + current_private_key + .unwrap() + .sign_arbitrary_message(&rotation_msg.clone()) + }; let rotation_proof_signed_by_new_private_key = - new_private_key.sign_arbitrary_message(&rotation_msg); - + if let Some(new_derivation_path) = new_derivation_path.clone() { + eprintln!("Sign rotation proof challenge on your Ledger device (new key)"); + let challenge_signature = aptos_ledger::sign_message( + new_derivation_path.clone().as_str(), + &rotation_msg.clone(), + )?; + eprintln!("Rotation proof challenge successfully signed (new key)"); + if current_is_hardware_wallet { + eprintln!("You will still need to sign the transaction on your Ledger device"); + } + challenge_signature + } else { + new_private_key + .clone() + .unwrap() + .sign_arbitrary_message(&rotation_msg.clone()) + }; + + // Submit transaction. + if current_derivation_path.is_some() { + eprintln!("Approve transaction on your Ledger device"); + }; let txn_summary = self .txn_options .submit_transaction(aptos_stdlib::account_rotate_authentication_key( 0, - // Existing public key - current_private_key.public_key().to_bytes().to_vec(), + current_public_key.to_bytes().to_vec(), 0, - // New public key - new_private_key.public_key().to_bytes().to_vec(), + new_public_key.to_bytes().to_vec(), rotation_proof_signed_by_current_private_key .to_bytes() .to_vec(), @@ -152,10 +267,9 @@ impl CliCommand for RotateKey { .await .map(TransactionSummary::from)?; - let string = serde_json::to_string_pretty(&txn_summary) + let txn_string = serde_json::to_string_pretty(&txn_summary) .map_err(|err| CliError::UnableToParse("transaction summary", err.to_string()))?; - - eprintln!("{}", string); + eprintln!("{}", txn_string); if let Some(txn_success) = txn_summary.success { if !txn_success { @@ -169,87 +283,48 @@ impl CliCommand for RotateKey { )); } - let mut profile_name: String; - - if self.save_to_profile.is_none() { - if self.skip_saving_profile - || !prompt_yes("Do you want to create a profile for the new key?") - { - return Ok(RotateSummary { - transaction: txn_summary, - message: None, - }); - } - - eprintln!("Enter the name for the profile"); - profile_name = read_line("Profile name")?.trim().to_string(); - } else { - // We can safely unwrap here - profile_name = self.save_to_profile.unwrap(); + if self.new_profile_options.skip_saving_profile { + return Ok(RotateSummary { + transaction: txn_summary, + message: None, + }); } - // Check if profile name exists - let mut config = CliConfig::load(ConfigSearchMode::CurrentDirAndParents)?; + // Can safe unwrap here since NewProfileOptions arg group requires either that + // skip_saving_profile is set, or that a new profile name is specified. If a new profile is + // specified, then it will have already been error checked above. + let new_profile_name = self.new_profile_options.save_to_profile.unwrap(); - if let Some(ref profiles) = config.profiles { - if profiles.contains_key(&profile_name) { - if let Err(cli_err) = prompt_yes_with_override( - format!( - "Profile {} exits. Do you want to provide a new profile name?", - profile_name - ) - .as_str(), - self.txn_options.prompt_options, - ) { - match cli_err { - CliError::AbortedError => { - return Ok(RotateSummary { - transaction: txn_summary, - message: None, - }); - }, - _ => { - return Err(cli_err); - }, - } - } - - eprintln!("Enter the name for the profile"); - profile_name = read_line("Profile name")?.trim().to_string(); - } - } - - if profile_name.is_empty() { - return Err(CliError::AbortedError); + // If no config exists, then the error should've been caught earlier during the profile + // name verification step. + let mut config = CliConfig::load(ConfigSearchMode::CurrentDirAndParents)?; + if config.profiles.is_none() { + config.profiles = Some(BTreeMap::new()); } - let mut profile_config = ProfileConfig { - private_key: Some(new_private_key.clone()), - public_key: Some(new_private_key.public_key()), - account: Some(sender_address), + // Create new config. + let mut new_profile_config = ProfileConfig { + public_key: Some(new_public_key), + account: Some(current_address), + private_key: new_private_key, + derivation_path: new_derivation_path, ..self.txn_options.profile_options.profile()? }; if let Some(url) = self.txn_options.rest_options.url { - profile_config.rest_url = Some(url.into()); - } - - if config.profiles.is_none() { - config.profiles = Some(BTreeMap::new()); + new_profile_config.rest_url = Some(url.into()); } config .profiles .as_mut() .unwrap() - .insert(profile_name.clone(), profile_config); + .insert(new_profile_name.clone(), new_profile_config); config.save()?; - eprintln!("Profile {} is saved.", profile_name); - Ok(RotateSummary { transaction: txn_summary, - message: Some(format!("Profile {} is saved.", profile_name)), + message: Some(format!("Saved new profile {}", new_profile_name)), }) } } diff --git a/crates/aptos/src/account/multisig_account.rs b/crates/aptos/src/account/multisig_account.rs index f20a2db2b1a0b..398d35fed72bc 100644 --- a/crates/aptos/src/account/multisig_account.rs +++ b/crates/aptos/src/account/multisig_account.rs @@ -136,7 +136,8 @@ impl SupraCommand for Create { metadata_value, self.timeout_duration, ); - + + Ok( SupraCommandArguments { payload, diff --git a/crates/aptos/src/common/init.rs b/crates/aptos/src/common/init.rs index 838cde8081b66..c5c8328d0c829 100644 --- a/crates/aptos/src/common/init.rs +++ b/crates/aptos/src/common/init.rs @@ -9,7 +9,7 @@ use crate::{ ConfigSearchMode, EncodingOptions, HardwareWalletOptions, PrivateKeyInputOptions, ProfileConfig, ProfileOptions, PromptOptions, RngArgs, DEFAULT_PROFILE, }, - utils::{fund_account, prompt_yes_with_override, read_line}, + utils::{explorer_account_link, fund_account, prompt_yes_with_override, read_line}, }, }; use aptos_crypto::{ed25519::Ed25519PrivateKey, PrivateKey, ValidCryptoMaterialStringExt}; @@ -22,7 +22,11 @@ use async_trait::async_trait; use clap::Parser; use reqwest::Url; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, str::FromStr}; +use std::{ + collections::BTreeMap, + fmt::{Display, Formatter}, + str::FromStr, +}; /// 1 SUPRA (might not actually get that much, depending on the faucet) const NUM_DEFAULT_OCTAS: u64 = 100000000; @@ -122,6 +126,9 @@ impl CliCommand<()> for InitTool { } }; + // Ensure the config contains the network used + profile_config.network = Some(network); + // Ensure that there is at least a REST URL set for the network match network { Network::Mainnet => { @@ -337,7 +344,16 @@ impl CliCommand<()> for InitTool { .expect("Must have profiles, as created above") .insert(profile_name.to_string(), profile_config); config.save()?; - eprintln!("\n---\nAptos CLI is now set up for account {} as profile {}! Run `aptos --help` for more information about commands", address, self.profile_options.profile_name().unwrap_or(DEFAULT_PROFILE)); + let profile_name = self + .profile_options + .profile_name() + .unwrap_or(DEFAULT_PROFILE); + eprintln!( + "\n---\nAptos CLI is now set up for account {} as profile {}!\n See the account here: {}\n Run `aptos --help` for more information about commands", + address, + profile_name, + explorer_account_link(address, Some(network)) + ); Ok(()) } } @@ -431,6 +447,18 @@ pub enum Network { Custom, } +impl Display for Network { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", match self { + Network::Mainnet => "mainnet", + Network::Testnet => "testnet", + Network::Devnet => "devnet", + Network::Local => "local", + Network::Custom => "custom", + }) + } +} + impl FromStr for Network { type Err = CliError; diff --git a/crates/aptos/src/common/local_simulation.rs b/crates/aptos/src/common/local_simulation.rs index 8955326eaec25..8828445011b48 100644 --- a/crates/aptos/src/common/local_simulation.rs +++ b/crates/aptos/src/common/local_simulation.rs @@ -83,6 +83,7 @@ pub fn benchmark_transaction_using_debugger( times.push(t2 - t1); } + times.sort(); times[n / 2] }; diff --git a/crates/aptos/src/common/types.rs b/crates/aptos/src/common/types.rs index f9149a4b36986..b76331ab4d0ee 100644 --- a/crates/aptos/src/common/types.rs +++ b/crates/aptos/src/common/types.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::utils::fund_account; +use super::utils::{explorer_transaction_link, fund_account}; use crate::{ common::{ init::Network, @@ -18,7 +18,7 @@ use crate::{ genesis::git::from_yaml, move_tool::{ArgWithType, FunctionArgType, MemberId}, }; -use anyhow::Context; +use anyhow::{bail, Context}; use aptos_api_types::ViewFunction; use aptos_crypto::{ ed25519::{Ed25519PrivateKey, Ed25519PublicKey, Ed25519Signature}, @@ -106,6 +106,14 @@ pub enum CliError { MoveTestError, #[error("Move Prover failed: {0}")] MoveProverError(String), + #[error( + "The package is larger than {1} bytes ({0} bytes)! \ + To lower the size you may want to include less artifacts via `--included-artifacts`. \ + You can also override this check with `--override-size-check`. \ + Alternatively, you can use the `--chunked-publish` to enable chunked publish mode, \ + which chunks down the package and deploys it in several stages." + )] + PackageSizeExceeded(usize, usize), #[error("Unable to parse '{0}': error: {1}")] UnableToParse(&'static str, String), #[error("Unable to read file '{0}', error: {1}")] @@ -131,6 +139,7 @@ impl CliError { CliError::MoveCompilationError(_) => "MoveCompilationError", CliError::MoveTestError => "MoveTestError", CliError::MoveProverError(_) => "MoveProverError", + CliError::PackageSizeExceeded(_, _) => "PackageSizeExceeded", CliError::UnableToParse(_, _) => "UnableToParse", CliError::UnableToReadFile(_, _) => "UnableToReadFile", CliError::UnexpectedError(_) => "UnexpectedError", @@ -232,6 +241,7 @@ pub const CONFIG_FOLDER: &str = ".aptos"; /// An individual profile #[derive(Debug, Default, Serialize, Deserialize)] pub struct ProfileConfig { + /// Name of network being used, if setup from aptos init #[serde(skip_serializing_if = "Option::is_none")] pub network: Option, /// Private key for commands. @@ -675,18 +685,17 @@ pub trait ParsePrivateKey { #[derive(Debug, Default, Parser)] pub struct HardwareWalletOptions { - /// Derivation Path of your account in hardware wallet + /// BIP44 derivation path of hardware wallet account, e.g. `m/44'/637'/0'/0'/0'` /// - /// e.g format - m/44\'/637\'/0\'/0\'/0\' - /// Make sure your wallet is unlocked and have Aptos opened - #[clap(long)] + /// Note you may need to escape single quotes in your shell, for example + /// `m/44'/637'/0'/0'/0'` would be `m/44\'/637\'/0\'/0\'/0\'` + #[clap(long, conflicts_with = "derivation_index")] pub derivation_path: Option, - /// Index of your account in hardware wallet + /// BIP44 account index of hardware wallet account, e.g. `0` /// - /// This is the simpler version of derivation path e.g `format - [0]` - /// we will translate this index into `[m/44'/637'/0'/0'/0]` - #[clap(long)] + /// Given index `n` maps to BIP44 derivation path `m/44'/637'/n'/0'/0` + #[clap(long, conflicts_with = "derivation_path")] pub derivation_index: Option, } @@ -966,7 +975,7 @@ impl SaveFile { } /// Options specific to using the Rest endpoint -#[derive(Debug, Default, Parser)] +#[derive(Debug, Parser)] pub struct RestOptions { /// URL to a fullnode on the network /// @@ -985,6 +994,16 @@ pub struct RestOptions { pub node_api_key: Option, } +impl Default for RestOptions { + fn default() -> Self { + Self { + url: None, + connection_timeout_secs: DEFAULT_EXPIRATION_SECS, + node_api_key: None, + } + } +} + impl RestOptions { pub fn new(url: Option, connection_timeout_secs: Option) -> Self { RestOptions { @@ -1022,25 +1041,53 @@ impl RestOptions { } } +/// Options for optimization level +#[derive(Debug, Clone, Parser)] +pub enum OptimizationLevel { + /// No optimizations + None, + /// Default optimization level + Default, + /// Extra optimizations, that may take more time + Extra, +} + +impl Default for OptimizationLevel { + fn default() -> Self { + Self::Default + } +} + +impl FromStr for OptimizationLevel { + type Err = anyhow::Error; + + /// Parses an optimization level, or default. + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "none" => Ok(Self::None), + "" | "default" => Ok(Self::Default), + "extra" => Ok(Self::Extra), + _ => bail!( + "unrecognized optimization level `{}` (supported versions: `none`, `default`, `aggressive`)", + s + ), + } + } +} + /// Options for compiling a move package dir #[derive(Debug, Clone, Parser)] pub struct MovePackageDir { - /// Enables dev mode, which uses all dev-addresses and dev-dependencies - /// - /// Dev mode allows for changing dependencies and addresses to the preset [dev-addresses] and - /// [dev-dependencies] fields. This works both inside and out of tests for using preset values. - /// - /// Currently, it also additionally pulls in all test compilation artifacts - #[clap(long)] - pub dev: bool, - /// Path to a move package (the folder with a Move.toml file) + /// Path to a move package (the folder with a Move.toml file). Defaults to current directory. #[clap(long, value_parser)] pub package_dir: Option, + /// Path to save the compiled move package /// /// Defaults to `/build` #[clap(long, value_parser)] pub output_dir: Option, + /// Named addresses for the move binary /// /// Example: alice=0x1234, bob=0x5678 @@ -1061,36 +1108,83 @@ pub struct MovePackageDir { #[clap(long)] pub(crate) skip_fetch_latest_git_deps: bool, - /// Specify the version of the bytecode the compiler is going to emit. + /// Do not complain about unknown attributes in Move code. #[clap(long)] + pub skip_attribute_checks: bool, + + /// Enables dev mode, which uses all dev-addresses and dev-dependencies + /// + /// Dev mode allows for changing dependencies and addresses to the preset [dev-addresses] and + /// [dev-dependencies] fields. This works both inside and out of tests for using preset values. + /// + /// Currently, it also additionally pulls in all test compilation artifacts + #[clap(long)] + pub dev: bool, + + /// Do apply extended checks for Aptos (e.g. `#[view]` attribute) also on test code. + /// NOTE: this behavior will become the default in the future. + /// See + #[clap(long, env = "APTOS_CHECK_TEST_CODE")] + pub check_test_code: bool, + + /// Select optimization level. Choices are "none", "default", or "extra". + /// Level "extra" may spend more time on expensive optimizations in the future. + /// Level "none" does no optimizations, possibly leading to use of too many runtime resources. + /// Level "default" is the recommended level, and the default if not provided. + #[clap(long, alias = "optimization_level", value_parser = clap::value_parser!(OptimizationLevel))] + pub optimize: Option, + + /// Experiments + #[clap(long, hide(true))] + pub experiments: Vec, + + /// ...or --bytecode BYTECODE_VERSION + /// Specify the version of the bytecode the compiler is going to emit. + /// Defaults to `6`, or `7` if language version 2 is selected + /// (through `--move-2` or `--language_version=2`), . + #[clap( + long, + default_value_if("move_2", "true", "7"), + alias = "bytecode", + verbatim_doc_comment + )] pub bytecode_version: Option, + /// ...or --compiler COMPILER_VERSION /// Specify the version of the compiler. - /// Currently, default to `v1` - #[clap(long, value_parser = clap::value_parser!(CompilerVersion))] + /// Defaults to `1`, or `2` if `--move-2` is selected. + #[clap(long, value_parser = clap::value_parser!(CompilerVersion), + alias = "compiler", + default_value_if("move_2", "true", "2.0"), + verbatim_doc_comment)] pub compiler_version: Option, + /// ...or --language LANGUAGE_VERSION /// Specify the language version to be supported. - /// Currently, default to `v1` - #[clap(long, value_parser = clap::value_parser!(LanguageVersion))] + /// Currently, defaults to `1`, unless `--move-2` is selected. + #[clap(long, value_parser = clap::value_parser!(LanguageVersion), + alias = "language", + default_value_if("move_2", "true", "2.0"), + verbatim_doc_comment)] pub language_version: Option, - /// Do not complain about unknown attributes in Move code. - #[clap(long)] - pub skip_attribute_checks: bool, + /// Select bytecode, language version, and compiler to support Move 2: + /// Same as `--bytecode_version=7 --language_version=2.0 --compiler_version=2.0` + #[clap(long, verbatim_doc_comment)] + pub move_2: bool, +} - /// Do apply extended checks for Aptos (e.g. `#[view]` attribute) also on test code. - /// NOTE: this behavior will become the default in the future. - /// See - #[clap(long, env = "APTOS_CHECK_TEST_CODE")] - pub check_test_code: bool, +impl Default for MovePackageDir { + fn default() -> Self { + Self::new() + } } impl MovePackageDir { - pub fn new(package_dir: PathBuf) -> Self { + pub fn new() -> Self { Self { dev: false, - package_dir: Some(package_dir), + package_dir: None, output_dir: None, named_addresses: Default::default(), override_std: None, @@ -1100,6 +1194,9 @@ impl MovePackageDir { language_version: None, skip_attribute_checks: false, check_test_code: false, + move_2: false, + optimize: None, + experiments: vec![], } } @@ -1404,12 +1501,12 @@ pub struct ChangeSummary { pub struct FaucetOptions { /// URL for the faucet endpoint e.g. `https://faucet.devnet.aptoslabs.com` #[clap(long)] - faucet_url: Option, + pub faucet_url: Option, /// Auth token to bypass faucet ratelimits. You can also set this as an environment /// variable with FAUCET_AUTH_TOKEN. #[clap(long, env)] - faucet_auth_token: Option, + pub faucet_auth_token: Option, } impl FaucetOptions { @@ -1716,25 +1813,19 @@ impl TransactionOptions { adjusted_max_gas }; - // Sign and submit transaction + // Build a transaction let transaction_factory = TransactionFactory::new(chain_id) .with_gas_unit_price(gas_unit_price) .with_max_gas_amount(max_gas) .with_transaction_expiration_time(self.gas_options.expiration_secs); - match self.get_transaction_account_type() { + // Sign it with the appropriate signer + let transaction = match self.get_transaction_account_type() { Ok(AccountType::Local) => { let (private_key, _) = self.get_key_and_address()?; let sender_account = &mut LocalAccount::new(sender_address, private_key, sequence_number); - let transaction = sender_account - .sign_with_transaction_builder(transaction_factory.payload(payload)); - let response = client - .submit_and_wait(&transaction) - .await - .map_err(|err| CliError::ApiError(err.to_string()))?; - - Ok(response.into_inner()) + sender_account.sign_with_transaction_builder(transaction_factory.payload(payload)) }, Ok(AccountType::HardwareWallet) => { let sender_account = &mut HardwareWalletAccount::new( @@ -1747,17 +1838,33 @@ impl TransactionOptions { HardwareWalletType::Ledger, sequence_number, ); - let transaction = sender_account - .sign_with_transaction_builder(transaction_factory.payload(payload))?; - let response = client - .submit_and_wait(&transaction) - .await - .map_err(|err| CliError::ApiError(err.to_string()))?; - - Ok(response.into_inner()) + sender_account + .sign_with_transaction_builder(transaction_factory.payload(payload))? }, - Err(err) => Err(err), - } + Err(err) => return Err(err), + }; + + // Submit the transaction, printing out a useful transaction link + client + .submit_bcs(&transaction) + .await + .map_err(|err| CliError::ApiError(err.to_string()))?; + let transaction_hash = transaction.clone().committed_hash(); + let network = self + .profile_options + .profile() + .ok() + .and_then(|profile| profile.network); + eprintln!( + "Transaction submitted: {}", + explorer_transaction_link(transaction_hash, network) + ); + let response = client + .wait_for_signed_transaction(&transaction) + .await + .map_err(|err| CliError::ApiError(err.to_string()))?; + + Ok(response.into_inner()) } /// Simulates a transaction locally, using the debugger to fetch required data from remote. @@ -2240,3 +2347,13 @@ pub struct OverrideSizeCheckOption { #[clap(long)] pub(crate) override_size_check: bool, } + +#[derive(Parser)] +pub struct ChunkedPublishOption { + /// Whether to publish a package in a chunked mode. This may require more than one transaction + /// for publishing the Move package. + /// + /// Use this option for publishing large packages exceeding `MAX_PUBLISH_PACKAGE_SIZE`. + #[clap(long)] + pub(crate) chunked_publish: bool, +} diff --git a/crates/aptos/src/common/utils.rs b/crates/aptos/src/common/utils.rs index bbb85e35dbc66..3c58799df138d 100644 --- a/crates/aptos/src/common/utils.rs +++ b/crates/aptos/src/common/utils.rs @@ -2,9 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - common::types::{ - account_address_from_public_key, CliError, CliTypedResult, PromptOptions, - TransactionOptions, TransactionSummary, + common::{ + init::Network, + types::{ + account_address_from_public_key, CliError, CliTypedResult, PromptOptions, + TransactionOptions, TransactionSummary, + }, }, config::GlobalConfig, CliResult, @@ -563,3 +566,34 @@ pub fn view_json_option_str(option_ref: &serde_json::Value) -> CliTypedResult) -> String { + // For now, default to what the browser is already on, though the link could be wrong + if let Some(network) = network { + format!( + "https://explorer.aptoslabs.com/account/{}?network={}", + hash, network + ) + } else { + format!("https://explorer.aptoslabs.com/account/{}", hash) + } +} + +pub fn explorer_transaction_link( + hash: aptos_crypto::HashValue, + network: Option, +) -> String { + // For now, default to what the browser is already on, though the link could be wrong + if let Some(network) = network { + format!( + "https://explorer.aptoslabs.com/txn/{}?network={}", + hash.to_hex_literal(), + network + ) + } else { + format!( + "https://explorer.aptoslabs.com/txn/{}", + hash.to_hex_literal() + ) + } +} diff --git a/crates/aptos/src/config/mod.rs b/crates/aptos/src/config/mod.rs index 00e8fc401b81a..f9aeca5e9a808 100644 --- a/crates/aptos/src/config/mod.rs +++ b/crates/aptos/src/config/mod.rs @@ -25,18 +25,24 @@ use std::{collections::BTreeMap, fmt::Formatter, path::PathBuf, str::FromStr}; /// default configuration, and user specific settings. #[derive(Parser)] pub enum ConfigTool { + DeleteProfile(DeleteProfile), GenerateShellCompletions(GenerateShellCompletions), + RenameProfile(RenameProfile), SetGlobalConfig(SetGlobalConfig), ShowGlobalConfig(ShowGlobalConfig), + ShowPrivateKey(ShowPrivateKey), ShowProfiles(ShowProfiles), } impl ConfigTool { pub async fn execute(self) -> CliResult { match self { + ConfigTool::DeleteProfile(tool) => tool.execute_serialized().await, ConfigTool::GenerateShellCompletions(tool) => tool.execute_serialized_success().await, + ConfigTool::RenameProfile(tool) => tool.execute_serialized().await, ConfigTool::SetGlobalConfig(tool) => tool.execute_serialized().await, ConfigTool::ShowGlobalConfig(tool) => tool.execute_serialized().await, + ConfigTool::ShowPrivateKey(tool) => tool.execute_serialized().await, ConfigTool::ShowProfiles(tool) => tool.execute_serialized().await, } } @@ -112,6 +118,47 @@ impl CliCommand for SetGlobalConfig { } } +/// Show the private key for the given profile +#[derive(Parser, Debug)] +pub struct ShowPrivateKey { + /// Which profile's private key to show + #[clap(long)] + profile: String, +} + +#[async_trait] +impl CliCommand for ShowPrivateKey { + fn command_name(&self) -> &'static str { + "ShowPrivateKey" + } + + async fn execute(self) -> CliTypedResult { + let config = CliConfig::load(ConfigSearchMode::CurrentDir)?; + + if let Some(profiles) = &config.profiles { + if let Some(profile) = profiles.get(&self.profile.clone()) { + if let Some(private_key) = &profile.private_key { + Ok(format!("0x{}", hex::encode(private_key.to_bytes()))) + } else { + Err(CliError::CommandArgumentError(format!( + "Profile {} does not have a private key", + self.profile + ))) + } + } else { + Err(CliError::CommandArgumentError(format!( + "Profile {} does not exist", + self.profile + ))) + } + } else { + Err(CliError::CommandArgumentError( + "Config has no profiles".to_string(), + )) + } + } +} + /// Shows the current profiles available /// /// This will only show public information and will not show @@ -150,6 +197,99 @@ impl CliCommand> for ShowProfiles { } } +/// Delete the specified profile. +#[derive(Parser, Debug)] +pub struct DeleteProfile { + /// Which profile to delete + #[clap(long)] + profile: String, +} + +#[async_trait] +impl CliCommand for DeleteProfile { + fn command_name(&self) -> &'static str { + "DeleteProfile" + } + + async fn execute(self) -> CliTypedResult { + let mut config = CliConfig::load(ConfigSearchMode::CurrentDir)?; + + if let Some(profiles) = &mut config.profiles { + if profiles.remove(&self.profile).is_none() { + Err(CliError::CommandArgumentError(format!( + "Profile {} does not exist", + self.profile + ))) + } else { + config.save().map_err(|err| { + CliError::UnexpectedError(format!( + "Unable to save config after deleting profile: {}", + err, + )) + })?; + Ok(format!("Deleted profile {}", self.profile)) + } + } else { + Err(CliError::CommandArgumentError( + "Config has no profiles".to_string(), + )) + } + } +} + +/// Rename the specified profile. +#[derive(Parser, Debug)] +pub struct RenameProfile { + /// Which profile to rename + #[clap(long)] + profile: String, + + /// New profile name + #[clap(long)] + new_profile_name: String, +} + +#[async_trait] +impl CliCommand for RenameProfile { + fn command_name(&self) -> &'static str { + "RenameProfile" + } + + async fn execute(self) -> CliTypedResult { + let mut config = CliConfig::load(ConfigSearchMode::CurrentDir)?; + + if let Some(profiles) = &mut config.profiles { + if profiles.contains_key(&self.new_profile_name.clone()) { + Err(CliError::CommandArgumentError(format!( + "Profile {} already exists", + self.new_profile_name + ))) + } else if let Some(profile_config) = profiles.remove(&self.profile) { + profiles.insert(self.new_profile_name.clone(), profile_config); + config.save().map_err(|err| { + CliError::UnexpectedError(format!( + "Unable to save config after renaming profile: {}", + err, + )) + })?; + Ok(format!( + "Renamed profile {} to {}", + self.profile, self.new_profile_name + )) + } else { + Err(CliError::CommandArgumentError(format!( + "Profile {} does not exist", + self.profile + ))) + } + } else { + Err(CliError::CommandArgumentError( + "Config has no profiles".to_string(), + )) + } + } +} + /// Shows the properties in the global config #[derive(Parser, Debug)] pub struct ShowGlobalConfig {} diff --git a/crates/aptos/src/governance/mod.rs b/crates/aptos/src/governance/mod.rs index 1ee19538dbaba..70db6b284e9d8 100644 --- a/crates/aptos/src/governance/mod.rs +++ b/crates/aptos/src/governance/mod.rs @@ -41,6 +41,7 @@ use move_core_types::{ ident_str, language_storage::ModuleId, parser::parse_type_tag, transaction_argument::TransactionArgument, }; +use move_model::metadata::{CompilerVersion, LanguageVersion}; use reqwest::Url; use serde::{Deserialize, Serialize}; use std::{ @@ -51,6 +52,7 @@ use std::{ }; use supra_aptos::{SupraCommand, SupraCommandArguments}; use tempfile::TempDir; +use crate::common::types::PoolAddressArgs; /// Tool for on-chain governance /// @@ -281,6 +283,8 @@ async fn get_proposal( /// Submit a governance proposal #[derive(Parser)] pub struct SubmitProposal { + #[clap(flatten)] + pub(crate) pool_address_args: PoolAddressArgs, #[clap(flatten)] pub(crate) args: SubmitProposalArgs, } @@ -817,12 +821,14 @@ impl std::fmt::Display for ProposalMetadata { } } -fn compile_in_temp_dir( +pub fn compile_in_temp_dir( script_name: &str, script_path: &Path, framework_package_args: &FrameworkPackageArgs, prompt_options: PromptOptions, bytecode_version: Option, + language_version: Option, + compiler_version: Option, ) -> CliTypedResult<(Vec, HashValue)> { // Make a temporary directory for compilation let temp_dir = TempDir::new().map_err(|err| { @@ -862,6 +868,8 @@ fn compile_in_temp_dir( framework_package_args.skip_fetch_latest_git_deps, package_dir, bytecode_version, + language_version, + compiler_version, ) } @@ -869,6 +877,8 @@ fn compile_script( skip_fetch_latest_git_deps: bool, package_dir: &Path, bytecode_version: Option, + language_version: Option, + compiler_version: Option, ) -> CliTypedResult<(Vec, HashValue)> { let build_options = BuildOptions { with_srcs: false, @@ -877,6 +887,8 @@ fn compile_script( with_error_map: false, skip_fetch_latest_git_deps, bytecode_version, + language_version, + compiler_version, ..BuildOptions::default() }; @@ -954,7 +966,7 @@ impl SupraCommand for ExecuteProposal { } /// Compile a specified script. -#[derive(Parser)] +#[derive(Parser, Default)] pub struct CompileScriptFunction { /// Path to the Move script for the proposal #[clap(long, group = "script", value_parser)] @@ -967,8 +979,20 @@ pub struct CompileScriptFunction { #[clap(flatten)] pub(crate) framework_package_args: FrameworkPackageArgs, - #[clap(long)] + #[clap(long, default_value_if("move_2", "true", "7"))] pub(crate) bytecode_version: Option, + + #[clap(long, value_parser = clap::value_parser!(CompilerVersion), + default_value_if("move_2", "true", "2.0"))] + pub compiler_version: Option, + + #[clap(long, value_parser = clap::value_parser!(LanguageVersion), + default_value_if("move_2", "true", "2.0"))] + pub language_version: Option, + + /// Select bytecode, language, compiler for Move 2 + #[clap(long)] + pub move_2: bool, } impl CompileScriptFunction { @@ -1014,6 +1038,8 @@ impl CompileScriptFunction { &self.framework_package_args, prompt_options, self.bytecode_version, + self.language_version, + self.compiler_version, ) } } @@ -1066,17 +1092,7 @@ impl CliCommand<()> for GenerateUpgradeProposal { next_execution_hash, } = self; let package_path = move_options.get_package_path()?; - let options = included_artifacts.build_options( - move_options.dev, - move_options.skip_fetch_latest_git_deps, - move_options.named_addresses(), - move_options.override_std, - move_options.bytecode_version, - move_options.compiler_version, - move_options.language_version, - move_options.skip_attribute_checks, - move_options.check_test_code, - ); + let options = included_artifacts.build_options(&move_options)?; let package = BuiltPackage::build(package_path, options)?; let release = ReleasePackage::new(package)?; @@ -1089,10 +1105,14 @@ impl CliCommand<()> for GenerateUpgradeProposal { // If we're generating a multi-step proposal } else { let next_execution_hash_bytes = hex::decode(next_execution_hash)?; + let next_execution_hash = + HashValue::from_slice(next_execution_hash_bytes).map_err(|_err| { + CliError::CommandArgumentError("Invalid next execution hash".to_string()) + })?; release.generate_script_proposal_multi_step( account, output, - next_execution_hash_bytes, + Some(next_execution_hash), )?; }; Ok(()) @@ -1131,13 +1151,11 @@ impl GenerateExecutionHash { }; CompileScriptFunction { script_path: self.script_path.clone(), - compiled_script_path: None, framework_package_args: FrameworkPackageArgs { - framework_git_rev: None, framework_local_dir, - skip_fetch_latest_git_deps: false, + ..FrameworkPackageArgs::default() }, - bytecode_version: None, + ..CompileScriptFunction::default() } .compile("execution_hash", PromptOptions::yes()) } diff --git a/crates/aptos/src/move_tool/bytecode.rs b/crates/aptos/src/move_tool/bytecode.rs index c883783f02a51..6a662300b20db 100644 --- a/crates/aptos/src/move_tool/bytecode.rs +++ b/crates/aptos/src/move_tool/bytecode.rs @@ -39,7 +39,7 @@ const DECOMPILER_EXTENSION: &str = "mv.move"; /// /// For example, if you want to disassemble an on-chain package `PackName` at account `0x42`: /// 1. Download the package with `aptos move download --account 0x42 --package PackName --bytecode` -/// 2. Disassemble the package bytecode with `aptos disassemble --package-path PackName/bytecode_modules` +/// 2. Disassemble the package bytecode with `aptos move disassemble --package-path PackName/bytecode_modules` #[derive(Debug, Parser)] pub struct Disassemble { #[clap(flatten)] @@ -214,6 +214,7 @@ impl BytecodeCommand { only_externally_visible: false, print_basic_blocks: true, print_locals: true, + print_bytecode_stats: false, }; let no_loc = Spanned::unsafe_no_loc(()).loc; let module: CompiledModule; diff --git a/crates/aptos/src/move_tool/coverage.rs b/crates/aptos/src/move_tool/coverage.rs index 102a22e3c9054..366b6bb9472f9 100644 --- a/crates/aptos/src/move_tool/coverage.rs +++ b/crates/aptos/src/move_tool/coverage.rs @@ -1,14 +1,19 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::common::types::{CliCommand, CliError, CliResult, CliTypedResult, MovePackageDir}; +use crate::{ + common::types::{CliCommand, CliError, CliResult, CliTypedResult, MovePackageDir}, + move_tool::{experiments_from_opt_level, fix_bytecode_version}, +}; use aptos_framework::extended_checks; use async_trait::async_trait; use clap::{Parser, Subcommand}; use move_compiler::compiled_unit::{CompiledUnit, NamedCompiledModule}; use move_coverage::{ - coverage_map::CoverageMap, format_csv_summary, format_human_summary, - source_coverage::SourceCoverageBuilder, summary::summarize_inst_cov, + coverage_map::CoverageMap, + format_csv_summary, format_human_summary, + source_coverage::{ColorChoice, SourceCoverageBuilder, TextIndicator}, + summary::summarize_inst_cov, }; use move_disassembler::disassembler::Disassembler; use move_package::{compilation::compiled_package::CompiledPackage, BuildConfig, CompilerConfig}; @@ -87,8 +92,18 @@ impl CliCommand<()> for SummaryCoverage { /// Display coverage information about the module against source code #[derive(Debug, Parser)] pub struct SourceCoverage { + /// Show coverage for the given module #[clap(long = "module")] pub module_name: String, + + /// Colorize output based on coverage + #[clap(long, default_value_t = ColorChoice::Default)] + pub color: ColorChoice, + + /// Tag each line with a textual indication of coverage + #[clap(long, default_value_t = TextIndicator::Explicit)] + pub tag: TextIndicator, + #[clap(flatten)] pub move_options: MovePackageDir, } @@ -110,9 +125,10 @@ impl CliCommand<()> for SourceCoverage { _ => panic!("Should all be modules"), }; let source_coverage = SourceCoverageBuilder::new(module, &coverage_map, source_map); - source_coverage - .compute_source_coverage(source_path) - .output_source_coverage(&mut std::io::stdout()) + let source_coverage = source_coverage.compute_source_coverage(source_path); + let output_result = + source_coverage.output_source_coverage(&mut std::io::stdout(), self.color, self.tag); + output_result .map_err(|err| CliError::UnexpectedError(format!("Failed to get coverage {}", err))) } } @@ -149,14 +165,23 @@ fn compile_coverage( dev_mode: move_options.dev, additional_named_addresses: move_options.named_addresses(), test_mode: false, + full_model_generation: move_options.check_test_code, install_dir: move_options.output_dir.clone(), + skip_fetch_latest_git_deps: move_options.skip_fetch_latest_git_deps, compiler_config: CompilerConfig { known_attributes: extended_checks::get_all_attribute_names().clone(), - skip_attribute_checks: false, - ..Default::default() + skip_attribute_checks: move_options.skip_attribute_checks, + bytecode_version: fix_bytecode_version( + move_options.bytecode_version, + move_options.language_version, + ), + compiler_version: move_options.compiler_version, + language_version: move_options.language_version, + experiments: experiments_from_opt_level(&move_options.optimize), }, ..Default::default() }; + let path = move_options.get_package_path()?; let coverage_map = CoverageMap::from_binary_file(path.join(".coverage_map.mvcov")).map_err(|err| { diff --git a/crates/aptos/src/move_tool/fmt.rs b/crates/aptos/src/move_tool/fmt.rs new file mode 100644 index 0000000000000..ae1766c6528cc --- /dev/null +++ b/crates/aptos/src/move_tool/fmt.rs @@ -0,0 +1,170 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + common::{ + types::{CliCommand, CliError, CliTypedResult}, + utils::dir_default_to_current, + }, + update::get_movefmt_path, +}; +use async_trait::async_trait; +use clap::{Args, Parser}; +use move_command_line_common::files::find_move_filenames; +use move_package::source_package::layout::SourcePackageLayout; +use std::{collections::BTreeMap, fs, path::PathBuf, process::Command}; + +/// Format the Move source code. +#[derive(Debug, Parser)] +pub struct Fmt { + #[clap(flatten)] + pub command: FmtCommand, +} + +#[derive(clap::ValueEnum, Clone, Copy, Debug, Eq, PartialEq, PartialOrd)] +pub enum EmitMode { + Overwrite, + NewFile, + StdOut, + Diff, +} + +#[derive(Debug, Args)] +pub struct FmtCommand { + /// How to generate and show the result after reformatting. + /// Warning: if not specified or set in the config file, files will by default be overwritten. + #[clap(long, value_enum)] + emit_mode: Option, + + /// Path to the move package (the folder with a Move.toml file) to be formatted + #[clap(long, value_parser)] + package_path: Option, + + /// Path to the configuration file movefmt.toml. + /// If not given, search is done recursively from the current dir to its parents + #[clap(long, value_parser)] + pub config_path: Option, + + /// Set options from command line. These settings take + /// priority over movefmt.toml. + /// Config options can be found at https://github.com/movebit/movefmt/blob/develop/doc/how_to_use.md + #[clap(long, value_parser = crate::common::utils::parse_map::, default_value = "")] + pub(crate) config: BTreeMap, + + #[clap(long, short)] + /// Print verbose output + pub verbose: bool, + + #[clap(long, short)] + /// Print less output + pub quiet: bool, +} + +#[async_trait] +impl CliCommand for Fmt { + fn command_name(&self) -> &'static str { + "Fmt" + } + + async fn execute(mut self) -> CliTypedResult { + self.command.execute().await + } +} + +impl FmtCommand { + async fn execute(self) -> CliTypedResult { + let exe = get_movefmt_path()?; + let package_opt = self.package_path; + let config_path_opt = self.config_path; + let config_map = self.config; + let verbose_flag = self.verbose; + let quiet_flag = self.quiet; + let create_cmd = || { + let mut cmd = Command::new(exe.as_path()); + if let Some(emit_mode) = self.emit_mode { + let emit_mode = match emit_mode { + EmitMode::Overwrite => "overwrite", + EmitMode::NewFile => "new_file", + EmitMode::StdOut => "stdout", + EmitMode::Diff => "diff", + }; + cmd.arg(format!("--emit={}", emit_mode)); + } + if let Some(config_path) = config_path_opt.clone() { + cmd.arg(format!("--config-path={}", config_path.as_path().display())); + } + if verbose_flag { + cmd.arg("-v"); + } else if quiet_flag { + cmd.arg("-q"); + } + if !config_map.is_empty() { + let mut config_map_str_vec = vec![]; + for (key, value) in &config_map { + config_map_str_vec.push(format!("{}={}", key, value)); + } + cmd.arg(format!("--config={}", config_map_str_vec.join(","))); + } + cmd + }; + let to_cli_error = |e| CliError::IO(exe.display().to_string(), e); + let package_opt = if let Some(path) = package_opt { + fs::canonicalize(path.as_path()).ok() + } else { + None + }; + let package_path = dir_default_to_current(package_opt.clone()).unwrap(); + let root_res = SourcePackageLayout::try_find_root(&package_path.clone()); + if let Ok(root_package_path) = root_res { + let mut path_vec = vec![]; + let sources_path = root_package_path.join(SourcePackageLayout::Sources.path()); + if sources_path.exists() { + path_vec.push(sources_path.clone()); + } + let scripts_path = root_package_path.join(SourcePackageLayout::Scripts.path()); + if scripts_path.exists() { + path_vec.push(scripts_path.clone()); + } + if let Ok(move_sources) = find_move_filenames(&path_vec, false) { + for source in &move_sources { + let mut cur_cmd = create_cmd(); + cur_cmd.arg(format!("--file-path={}", source)); + let out = cur_cmd.output().map_err(to_cli_error)?; + if !out.status.success() { + return Err(CliError::UnexpectedError(format!( + "Formatter exited with status {}: {}", + out.status, + String::from_utf8(out.stderr).unwrap_or_default() + ))); + } else { + eprintln!("Formatting file:{:?}", source); + match String::from_utf8(out.stdout) { + Ok(output) => { + eprint!("{}", output); + }, + Err(err) => { + return Err(CliError::UnexpectedError(format!( + "Output generated by formatter is not valid utf8: {}", + err + ))); + }, + } + } + } + Ok(format!( + "Successfully formatted {} files", + move_sources.len() + )) + } else { + Err(CliError::UnexpectedError( + "Failed to find Move files".to_string(), + )) + } + } else { + Err(CliError::UnexpectedError(format!( + "Unable to find package manifest in {:?} or in its parents", + package_path + ))) + } + } +} diff --git a/crates/aptos/src/move_tool/lint.rs b/crates/aptos/src/move_tool/lint.rs new file mode 100644 index 0000000000000..f122b616da4bd --- /dev/null +++ b/crates/aptos/src/move_tool/lint.rs @@ -0,0 +1,137 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + common::types::{AccountAddressWrapper, CliCommand, CliTypedResult, MovePackageDir}, + move_tool::IncludedArtifacts, +}; +use aptos_framework::{BuildOptions, BuiltPackage}; +use async_trait::async_trait; +use clap::Parser; +use move_compiler_v2::Experiment; +use move_model::metadata::{CompilerVersion, LanguageVersion}; +use move_package::source_package::std_lib::StdVersion; +use std::{collections::BTreeMap, path::PathBuf}; + +/// Run a Lint tool to show additional warnings about the current package, in addition to ordinary +/// warnings and/or errors generated by the Move 2 compiler. +#[derive(Debug, Clone, Parser)] +pub struct LintPackage { + /// Path to a move package (the folder with a Move.toml file). Defaults to current directory. + #[clap(long, value_parser)] + pub package_dir: Option, + + /// Specify the path to save the compiled bytecode files which lint generates while + /// running checks. + /// Defaults to `/build` + #[clap(long, value_parser)] + pub output_dir: Option, + + /// ...or --language LANGUAGE_VERSION + /// Specify the language version to be supported. + /// Currently, defaults to `2.0`. + #[clap(long, value_parser = clap::value_parser!(LanguageVersion), + alias = "language", + default_value = "2.0", + verbatim_doc_comment)] + pub language_version: Option, + + /// Named addresses for the move binary + /// + /// Example: alice=0x1234, bob=0x5678 + /// + /// Note: This will fail if there are duplicates in the Move.toml file remove those first. + #[clap(long, value_parser = crate::common::utils::parse_map::, default_value = "")] + pub(crate) named_addresses: BTreeMap, + + /// Override the standard library version by mainnet/testnet/devnet + #[clap(long, value_parser)] + pub override_std: Option, + + /// Skip pulling the latest git dependencies + /// + /// If you don't have a network connection, the compiler may fail due + /// to no ability to pull git dependencies. This will allow overriding + /// this for local development. + #[clap(long)] + pub(crate) skip_fetch_latest_git_deps: bool, + + /// Do not complain about unknown attributes in Move code. + #[clap(long)] + pub skip_attribute_checks: bool, + + /// Enables dev mode, which uses all dev-addresses and dev-dependencies + /// + /// Dev mode allows for changing dependencies and addresses to the preset [dev-addresses] and + /// [dev-dependencies] fields. This works both inside and out of tests for using preset values. + /// + /// Currently, it also additionally pulls in all test compilation artifacts + #[clap(long)] + pub dev: bool, + + /// Do apply extended checks for Aptos (e.g. `#[view]` attribute) also on test code. + /// NOTE: this behavior will become the default in the future. + /// See + #[clap(long, env = "APTOS_CHECK_TEST_CODE")] + pub check_test_code: bool, +} + +impl LintPackage { + fn to_move_options(&self) -> MovePackageDir { + let LintPackage { + dev, + package_dir, + output_dir, + named_addresses, + override_std, + skip_fetch_latest_git_deps, + language_version, + skip_attribute_checks, + check_test_code, + } = self.clone(); + MovePackageDir { + dev, + package_dir, + output_dir, + named_addresses, + override_std, + skip_fetch_latest_git_deps, + language_version, + skip_attribute_checks, + check_test_code, + ..MovePackageDir::new() + } + } +} + +#[async_trait] +impl CliCommand<&'static str> for LintPackage { + fn command_name(&self) -> &'static str { + "LintPackage" + } + + async fn execute(self) -> CliTypedResult<&'static str> { + let move_options = MovePackageDir { + compiler_version: Some(CompilerVersion::V2_0), + ..self.to_move_options() + }; + let more_experiments = vec![ + Experiment::LINT_CHECKS.to_string(), + Experiment::SPEC_CHECK.to_string(), + Experiment::SEQS_IN_BINOPS_CHECK.to_string(), + Experiment::ACCESS_CHECK.to_string(), + Experiment::STOP_AFTER_EXTENDED_CHECKS.to_string(), + ]; + let package_path = move_options.get_package_path()?; + let included_artifacts = IncludedArtifacts::Sparse; + let build_options = BuildOptions { + ..included_artifacts.build_options_with_experiments( + &move_options, + more_experiments, + true, + )? + }; + BuiltPackage::build(package_path, build_options)?; + Ok("succeeded") + } +} diff --git a/crates/aptos/src/move_tool/mod.rs b/crates/aptos/src/move_tool/mod.rs index ebace9ddd792c..8c60c0a32e025 100644 --- a/crates/aptos/src/move_tool/mod.rs +++ b/crates/aptos/src/move_tool/mod.rs @@ -6,11 +6,11 @@ use crate::{ common::{ local_simulation, types::{ - load_account_arg, ArgWithTypeJSON, CliConfig, CliError, CliTypedResult, - ConfigSearchMode, EntryFunctionArguments, EntryFunctionArgumentsJSON, - MoveManifestAccountWrapper, MovePackageDir, OverrideSizeCheckOption, ProfileOptions, - PromptOptions, RestOptions, SaveFile, ScriptFunctionArguments, TransactionOptions, - TransactionSummary, + load_account_arg, ArgWithTypeJSON, ChunkedPublishOption, CliConfig, CliError, + CliTypedResult, ConfigSearchMode, EntryFunctionArguments, EntryFunctionArgumentsJSON, + MoveManifestAccountWrapper, MovePackageDir, OptimizationLevel, OverrideSizeCheckOption, + ProfileOptions, PromptOptions, RestOptions, SaveFile, ScriptFunctionArguments, + TransactionOptions, TransactionSummary, }, utils::{ check_if_file_exists, create_dir_if_not_exist, dir_default_to_current, @@ -21,19 +21,30 @@ use crate::{ move_tool::{ bytecode::{Decompile, Disassemble}, coverage::SummaryCoverage, + fmt::Fmt, + lint::LintPackage, manifest::{Dependency, ManifestNamedAddress, MovePackageManifest, PackageInfo}, }, CliCommand, CliResult, }; +use aptos_api_types::AptosErrorCode; use aptos_crypto::HashValue; use aptos_framework::{ - docgen::DocgenOptions, extended_checks, natives::code::UpgradePolicy, prover::ProverOptions, + chunked_publish::{ + chunk_package_and_create_payloads, large_packages_cleanup_staging_area, PublishType, + LARGE_PACKAGES_MODULE_ADDRESS, + }, + docgen::DocgenOptions, + extended_checks, + natives::code::UpgradePolicy, + prover::ProverOptions, BuildOptions, BuiltPackage, }; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters}; use aptos_move_debugger::aptos_debugger::AptosDebugger; use aptos_rest_client::{ aptos_api_types::{EntryFunctionId, HexEncodedBytes, IdentifierWrapper, MoveModuleId}, + error::RestError, Client, }; use aptos_types::{ @@ -45,15 +56,14 @@ use aptos_types::{ use aptos_vm::data_cache::AsMoveResolver; use async_trait::async_trait; use clap::{Parser, Subcommand, ValueEnum}; +use colored::Colorize; use itertools::Itertools; use move_cli::{self, base::test::UnitTestResult}; -use move_command_line_common::env::MOVE_HOME; +use move_command_line_common::{address::NumericalAddress, env::MOVE_HOME}; +use move_compiler_v2::Experiment; use move_core_types::{identifier::Identifier, language_storage::ModuleId, u256::U256}; use move_model::metadata::{CompilerVersion, LanguageVersion}; -use move_package::{ - source_package::{layout::SourcePackageLayout, std_lib::StdVersion}, - BuildConfig, CompilerConfig, -}; +use move_package::{source_package::layout::SourcePackageLayout, BuildConfig, CompilerConfig}; use move_unit_test::UnitTestingConfig; pub use package_hooks::*; use serde::{Deserialize, Serialize}; @@ -72,12 +82,18 @@ use url::Url; mod aptos_debug_natives; mod bytecode; pub mod coverage; +mod fmt; +mod lint; mod manifest; pub mod package_hooks; mod show; pub mod stored_package; -/// Tool for Move related operations +const HELLO_BLOCKCHAIN_EXAMPLE: &str = include_str!( + "../../../../aptos-move/move-examples/hello_blockchain/sources/hello_blockchain.move" +); + +/// Tool for Move smart contract related operations /// /// This tool lets you compile, test, and publish Move code, in addition /// to run any other tools that help run, verify, or provide information @@ -86,20 +102,28 @@ pub mod stored_package; pub enum MoveTool { BuildPublishPayload(BuildPublishPayload), Clean(CleanPackage), + ClearStagingArea(ClearStagingArea), + #[clap(alias = "build")] Compile(CompilePackage), + #[clap(alias = "build-script")] CompileScript(CompileScript), #[clap(subcommand)] Coverage(coverage::CoveragePackage), CreateObjectAndPublishPackage(CreateObjectAndPublishPackage), UpgradeObjectPackage(UpgradeObjectPackage), + DeployObject(DeployObjectCode), + UpgradeObject(UpgradeCodeObject), CreateResourceAccountAndPublishPackage(CreateResourceAccountAndPublishPackage), Disassemble(Disassemble), Decompile(Decompile), + #[clap(alias = "doc")] Document(DocumentPackage), Download(DownloadPackage), Init(InitPackage), + Lint(LintPackage), List(ListPackage), Prove(ProvePackage), + #[clap(alias = "deploy")] Publish(PublishPackage), Run(RunFunction), RunScript(RunScript), @@ -109,6 +133,7 @@ pub enum MoveTool { VerifyPackage(VerifyPackage), View(ViewFunction), Replay(Replay), + Fmt(Fmt), } impl MoveTool { @@ -116,6 +141,7 @@ impl MoveTool { match self { MoveTool::BuildPublishPayload(tool) => tool.execute_serialized().await, MoveTool::Clean(tool) => tool.execute_serialized().await, + MoveTool::ClearStagingArea(tool) => tool.execute_serialized().await, MoveTool::Compile(tool) => tool.execute_serialized().await, MoveTool::CompileScript(tool) => tool.execute_serialized().await, MoveTool::Coverage(tool) => tool.execute().await, @@ -123,6 +149,8 @@ impl MoveTool { tool.execute_serialized_success().await }, MoveTool::UpgradeObjectPackage(tool) => tool.execute_serialized_success().await, + MoveTool::DeployObject(tool) => tool.execute_serialized_success().await, + MoveTool::UpgradeObject(tool) => tool.execute_serialized_success().await, MoveTool::CreateResourceAccountAndPublishPackage(tool) => { tool.execute_serialized_success().await }, @@ -141,11 +169,13 @@ impl MoveTool { MoveTool::VerifyPackage(tool) => tool.execute_serialized().await, MoveTool::View(tool) => tool.execute_serialized().await, MoveTool::Replay(tool) => tool.execute_serialized().await, + MoveTool::Fmt(tool) => tool.execute_serialized().await, + MoveTool::Lint(tool) => tool.execute_serialized().await, } } } -#[derive(Parser)] +#[derive(Parser, Default)] pub struct FrameworkPackageArgs { /// Git revision or branch for the Aptos framework /// @@ -245,6 +275,11 @@ impl FrameworkPackageArgs { } } +#[derive(ValueEnum, Clone, Copy, Debug)] +pub enum Template { + HelloBlockchain, +} + /// Creates a new Move package at the given location /// /// This will create a directory for a Move package and a corresponding @@ -273,6 +308,10 @@ pub struct InitPackage { )] pub(crate) named_addresses: BTreeMap, + /// Template name for initialization + #[clap(long)] + pub(crate) template: Option