diff --git a/.github/auto_assign.yml b/.github/auto_assign.yml index 6d20e0a1a..823a7d540 100644 --- a/.github/auto_assign.yml +++ b/.github/auto_assign.yml @@ -2,7 +2,7 @@ addReviewers: true # Set to true to add assignees to pull requests -addAssignees: true +addAssignees: false # A list of reviewers to be added to pull requests (GitHub user name) reviewers: diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml deleted file mode 100644 index cd3312ad4..000000000 --- a/.github/workflows/build.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: "Mayastor tests" -on: - pull_request: - paths-ignore: - push: - branches: - - develop -jobs: - nix-shell: - name: Build and run cargo tests - runs-on: self-hosted - timeout-minutes: 30 - container: - image: docker.io/mayadata/ms-buildenv:latest - options: --privileged -v /dev:/dev -v /:/host -v /lib/modules:/lib/modules - steps: - - uses: actions/checkout@v2 - - run: /bin/modprobe nbd - - run: /bin/modprobe xfs - - run: /bin/modprobe nvme_tcp - - run: echo 8192 | tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages - - run: rm mayastor/.cargo/config - - run: rm nvmeadm/.cargo/config - - run: rm -rf /tmp/*.yaml - - run: nix-shell --run "echo 'Pulling in the environment...'" - - run: nix-shell --run "./scripts/cargo-test.sh" - Build_and_test_moac: - name: Build and run moac tests - runs-on: ubuntu-latest - container: - image: docker.io/mayadata/ms-buildenv:latest - steps: - - uses: actions/checkout@v2 - # npm prepare is normally done by npm install but not if run as a root - - run: nix-shell --run "cd csi/moac && npm install && npm run prepare && npm run compile" - - run: nix-shell --run "cd csi/moac && npm test" - Test_mayastor_with_mocha: - name: Run mocha tests - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v10 - - run: sudo apt-get install nvme-cli -y - - run: sudo modprobe nbd - - run: sudo modprobe xfs - - run: sudo modprobe nvme_tcp - - run: ( echo 2048 | sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages ) - - run: nix-shell --run "./scripts/node-test.sh" diff --git a/.github/workflows/image.yaml b/.github/workflows/image.yaml deleted file mode 100644 index ee8d02052..000000000 --- a/.github/workflows/image.yaml +++ /dev/null @@ -1,39 +0,0 @@ -name: "Image" -on: - pull_request: - paths-ignore: - #- 'doc/**' - push: - branches: - - develop -jobs: - MOAC: - name: Build MOAC image - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v10 - - uses: cachix/cachix-action@v5 - with: - name: mayastor - skipNixBuild: true - - run: nix-build -A images.moac-image -o /tmp/moac-image - - uses: actions/upload-artifact@v2 - with: - name: mayastor-moac-image - path: /tmp/moac-image - if: ${{ github.event_name != 'pull_request' }} - Mayastor: - name: Build Mayastor image - runs-on: ubuntu-latest - steps: - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v10 - - uses: cachix/cachix-action@v5 - with: - name: mayastor - skipNixBuild: true - - run: nix-build -A images.mayastor-image -o /tmp/mayastorImage diff --git a/.github/workflows/nix-tests.yaml b/.github/workflows/nix-tests.yaml deleted file mode 100644 index b79988f1c..000000000 --- a/.github/workflows/nix-tests.yaml +++ /dev/null @@ -1,27 +0,0 @@ -name: "Nix Workflow Tests" -on: - pull_request: - paths-ignore: - push: - branches: - - develop -jobs: - main: - name: Run Nix Tests - runs-on: self-hosted - timeout-minutes: 30 - defaults: - run: - working-directory: $GITHUB_WORKSPACE/repo-under-test - steps: - - uses: actions/checkout@v2 - with: - path: $GITHUB_WORKSPACE/repo-under-test # Checkout with a new path, to avoid permissions on the runner. - - run: modprobe kvm_intel nested=1 # Could do this once persistently on the runner. - - run: echo "::set-env name=NIX_PATH::/home/gila/.nix-defexpr/channels" - - run: bash -c "if [ -L ./result ]; then nix-store --delete ./result --ignore-liveness; fi" - - run: nix-build ./nix/test -A rebuild - - run: nix-build ./nix/test -A fio_nvme_basic - - run: nix-build ./nix/test -A nvmf_distributed - - run: nix-build ./nix/test -A nvmf_ports - - run: nix-build ./nix/test -A child_status diff --git a/.github/workflows/nix.yaml b/.github/workflows/nix.yaml deleted file mode 100644 index 79dcf9961..000000000 --- a/.github/workflows/nix.yaml +++ /dev/null @@ -1,17 +0,0 @@ -name: "nix-build with cachix" -on: - push: - pull_request: - paths: - - '**.nix' -jobs: - nix-build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v10 - - uses: cachix/cachix-action@v5 - with: - name: mayastor - signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' - attributes: libspdk diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml deleted file mode 100644 index a175e0edd..000000000 --- a/.github/workflows/pr.yaml +++ /dev/null @@ -1,21 +0,0 @@ -name: "Lint and style Checks" -on: - pull_request: -jobs: - Clippy: - name: Clippy - runs-on: ubuntu-latest - container: - image: docker.io/mayadata/ms-buildenv:latest - steps: - - uses: actions/checkout@master - - run: nix-shell --run "cargo fmt --all -- --check" - - run: nix-shell --run "cargo clippy --all-targets -- -D warnings" - Semistandard: - name: SemiStandard - runs-on: ubuntu-latest - container: - image: docker.io/node:12 - steps: - - uses: actions/checkout@master - - run: ./scripts/js-check.sh diff --git a/Cargo.lock b/Cargo.lock index c238790b0..a44200530 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,6 +60,16 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +[[package]] +name = "async-barrier" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06293698675eb72e1155867e5982f199d6b6c230dca35bc5ffd9852f470c22a" +dependencies = [ + "async-mutex", + "event-listener", +] + [[package]] name = "async-channel" version = "1.4.2" @@ -83,35 +93,56 @@ dependencies = [ [[package]] name = "async-executor" -version = "0.1.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f47c78ea98277cb1f5e6f60ba4fc762f5eafe9f6511bc2f7dfd8b75c225650" +checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801" dependencies = [ - "async-io", + "async-task 4.0.2", + "concurrent-queue", + "fastrand", + "futures-lite", + "once_cell", + "vec-arena", +] + +[[package]] +name = "async-fs" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3572236ba37147ca2b674a0bd5afd20aec0cd925ab125ab6fad6543960f9002" +dependencies = [ + "blocking", "futures-lite", - "multitask", - "parking 1.0.6", - "scoped-tls", - "waker-fn", ] [[package]] name = "async-io" -version = "0.1.11" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae22a338d28c75b53702b66f77979062cb29675db376d99e451af4fa79dedb3" +checksum = "6e727cebd055ab2861a854f79def078c4b99ea722d54c6800a0e274389882d4c" dependencies = [ - "cfg-if", "concurrent-queue", + "fastrand", "futures-lite", - "libc", + "log", + "nb-connect", "once_cell", - "parking 2.0.0", + "parking", "polling", - "socket2", "vec-arena", - "wepoll-sys-stjepang", - "winapi 0.3.9", + "waker-fn", +] + +[[package]] +name = "async-lock" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76000290eb3c67dfe4e2bdf6b6155847f8e16fc844377a7bd0b5e97622656362" +dependencies = [ + "async-barrier", + "async-mutex", + "async-rwlock", + "async-semaphore", ] [[package]] @@ -123,6 +154,53 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-net" +version = "1.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14a5335056541826f855bf95b936df9788adbacf94b15ef7104029f7fff3e82a" +dependencies = [ + "async-io", + "blocking", + "fastrand", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bb915df28b8309139bd9c9c700d84c20e5c21385d05378caa84912332d0f6a1" +dependencies = [ + "async-io", + "blocking", + "cfg-if", + "event-listener", + "futures-lite", + "once_cell", + "signal-hook", + "winapi 0.3.9", +] + +[[package]] +name = "async-rwlock" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "806b1cc0828c2b1611ccbdd743fc0cc7af09009e62c95a0501c1e5da7b142a22" +dependencies = [ + "async-mutex", + "event-listener", +] + +[[package]] +name = "async-semaphore" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "538c756e85eb6ffdefaec153804afb6da84b033e2e5ec3e9d459c34b4bf4d3f6" +dependencies = [ + "event-listener", +] + [[package]] name = "async-stream" version = "0.2.1" @@ -150,13 +228,20 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17772156ef2829aadc587461c7753af20b7e8db1529bc66855add962a3b35d3" +[[package]] +name = "async-task" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ab27c1aa62945039e44edaeee1dc23c74cc0c303dd5fe0fb462a184f1c3a518" + [[package]] name = "async-tls" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df097e3f506bec0e1a24f06bb3c962c228f36671de841ff579cb99f371772634" +checksum = "d85a97c4a0ecce878efd3f945f119c78a646d8975340bca0398f9bb05c30cc52" dependencies = [ - "futures", + "futures-core", + "futures-io", "rustls", "webpki", "webpki-roots", @@ -321,12 +406,13 @@ dependencies = [ [[package]] name = "blocking" -version = "0.5.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5800d29218fea137b0880387e5948694a23c93fcdde157006966693a865c7c" +checksum = "2640778f8053e72c11f621b0a5175a0560a269282aa98ed85107773ab8e2a556" dependencies = [ "async-channel", "atomic-waker", + "fastrand", "futures-lite", "once_cell", "waker-fn", @@ -736,6 +822,18 @@ dependencies = [ "generic-array", ] +[[package]] +name = "dns-lookup" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f69635ffdfbaea44241d7cca30a5e3a2e1c892613a6a8ad8ef03deeb6803480" +dependencies = [ + "cfg-if", + "libc", + "socket2", + "winapi 0.3.9", +] + [[package]] name = "doc-comment" version = "0.3.3" @@ -748,12 +846,6 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" -[[package]] -name = "easy-parallel" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd4afd79212583ff429b913ad6605242ed7eec277e950b1438f300748f948f4" - [[package]] name = "ed25519" version = "1.0.0-pre.1" @@ -976,15 +1068,15 @@ checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" [[package]] name = "futures-lite" -version = "0.1.11" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97999970129b808f0ccba93211201d431fcc12d7e1ffae03a61b5cedd1a7ced2" +checksum = "0db18c5f58083b54b0c416638ea73066722c2815c1e54dd8ba85ee3def593c3a" dependencies = [ "fastrand", "futures-core", "futures-io", "memchr", - "parking 2.0.0", + "parking", "pin-project-lite", "waker-fn", ] @@ -1314,6 +1406,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" + [[package]] name = "jsonrpc" version = "0.1.0" @@ -1353,9 +1451,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.76" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755456fae044e6fa1ebbbd1b3e902ae19e73097ed4ed87bb79934a867c007bc3" +checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235" [[package]] name = "libloading" @@ -1422,7 +1520,7 @@ name = "mayastor" version = "0.1.0" dependencies = [ "assert_matches", - "async-task", + "async-task 3.0.0", "async-trait", "bincode", "byte-unit", @@ -1433,6 +1531,7 @@ dependencies = [ "crc", "crossbeam", "crossbeam-sync", + "dns-lookup", "env_logger", "futures", "futures-timer", @@ -1446,6 +1545,7 @@ dependencies = [ "nix 0.16.1", "once_cell", "pin-utils", + "proc-mounts", "prost", "prost-derive", "prost-types", @@ -1456,6 +1556,7 @@ dependencies = [ "serde_json", "serde_yaml", "signal-hook", + "smol", "snafu", "spdk-sys", "structopt", @@ -1467,6 +1568,7 @@ dependencies = [ "tracing-futures", "tracing-log", "tracing-subscriber", + "udev", "url", "uuid", ] @@ -1571,32 +1673,23 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" -[[package]] -name = "multitask" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c09c35271e7dcdb5f709779111f2c8e8ab8e06c1b587c1c6a9e179d865aaa5b4" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", -] - [[package]] name = "nats" -version = "0.7.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cec67477160179e2cf8526f53ebf1fe060de38400663ad1a5de86bce46dda1d" +checksum = "4f0bc27324f2967df06397f8608a1fcfe76fa0fd17d1b6b90a8796f79b4d180f" dependencies = [ "async-channel", "async-dup", + "async-executor", + "async-io", "async-mutex", + "async-net", "async-tls", "base64-url", - "crossbeam-channel", - "futures-channel", + "futures-lite", "itoa", - "lazy_static", + "json", "log", "nkeys", "nuid", @@ -1605,9 +1698,16 @@ dependencies = [ "regex", "rustls", "rustls-native-certs", - "serde", - "serde_json", - "smol", +] + +[[package]] +name = "nb-connect" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "701f47aeb98466d0a7fea67e2c2f667c33efa1f2e4fd7f76743aac1153196f72" +dependencies = [ + "libc", + "winapi 0.3.9", ] [[package]] @@ -1761,12 +1861,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -[[package]] -name = "parking" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cb300f271742d4a2a66c01b6b2fa0c83dfebd2e0bf11addb879a3547b4ed87c" - [[package]] name = "parking" version = "2.0.0" @@ -1849,9 +1943,9 @@ checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "polling" -version = "0.1.9" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fffa183f6bd5f1a8a3e1f60ce2f8d5621e350eed84a62d6daaa5b9d1aaf6fbd" +checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" dependencies = [ "cfg-if", "libc", @@ -2323,12 +2417,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2515,18 +2603,20 @@ checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" [[package]] name = "smol" -version = "0.3.3" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67583f4ccc13bbb105a0752058d8ad66c47753d85445952809bcaca891954f83" +checksum = "7ca2722989073e89917a575862fb49dba3321af152f0cf4a4164d9482aabdf28" dependencies = [ "async-channel", "async-executor", + "async-fs", "async-io", + "async-lock", + "async-net", + "async-process", "blocking", - "cfg-if", - "easy-parallel", "futures-lite", - "num_cpus", + "once_cell", ] [[package]] @@ -3228,9 +3318,9 @@ dependencies = [ [[package]] name = "vec-arena" -version = "0.5.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb18268690309760d59ee1a9b21132c126ba384f374c59a94db4bc03adeb561" +checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" [[package]] name = "vec_map" @@ -3348,9 +3438,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" dependencies = [ "webpki", ] diff --git a/Jenkinsfile b/Jenkinsfile index c837978e4..6e1e378f9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,3 +1,46 @@ +#!/usr/bin/env groovy + +// Update status of a commit in github +def updateGithubCommitStatus(commit, msg, state) { + step([ + $class: 'GitHubCommitStatusSetter', + reposSource: [$class: "ManuallyEnteredRepositorySource", url: "https://github.com/openebs/Mayastor.git"], + commitShaSource: [$class: "ManuallyEnteredShaSource", sha: commit], + errorHandlers: [[$class: "ChangingBuildStatusErrorHandler", result: "UNSTABLE"]], + contextSource: [ + $class: 'ManuallyEnteredCommitContextSource', + context: 'continuous-integration/jenkins/branch' + ], + statusResultSource: [ + $class: 'ConditionalStatusResultSource', + results: [ + [$class: 'AnyBuildResult', message: msg, state: state] + ] + ] + ]) +} + +// Send out a slack message if branch got broken or has recovered +def notifySlackUponStateChange(build) { + def cur = build.getResult() + def prev = build.getPreviousBuild().getResult() + if (cur != prev) { + if (cur == 'SUCCESS') { + slackSend( + channel: '#mayastor-backend', + color: 'normal', + message: "Branch ${env.BRANCH_NAME} has been fixed :beers: (<${env.BUILD_URL}|Open>)" + ) + } else if (prev == 'SUCCESS') { + slackSend( + channel: '#mayastor-backend', + color: 'danger', + message: "Branch ${env.BRANCH_NAME} is broken :face_with_raised_eyebrow: (<${env.BUILD_URL}|Open>)" + ) + } + } +} + pipeline { agent none triggers { @@ -10,14 +53,18 @@ pipeline { when { beforeAgent true anyOf { - branch 'PR-*' allOf { - branch 'develop' - triggeredBy 'TimerTrigger' + branch 'staging' + not { triggeredBy 'TimerTrigger' } + } + allOf { + branch 'trying' + not { triggeredBy 'TimerTrigger' } } } } steps { + updateGithubCommitStatus(env.GIT_COMMIT, 'Started to test the commit', 'pending') sh 'nix-shell --run "cargo fmt --all -- --check"' sh 'nix-shell --run "cargo clippy --all-targets -- -D warnings"' sh 'nix-shell --run "./scripts/js-check.sh"' @@ -27,10 +74,20 @@ pipeline { when { beforeAgent true anyOf { - branch 'PR-*' + allOf { + branch 'staging' + not { triggeredBy 'TimerTrigger' } + } + allOf { + branch 'trying' + not { triggeredBy 'TimerTrigger' } + } allOf { branch 'develop' - triggeredBy 'TimerTrigger' + anyOf { + triggeredBy 'TimerTrigger' + triggeredBy cause: 'UserIdCause' + } } } } @@ -40,6 +97,12 @@ pipeline { steps { sh 'nix-shell --run "./scripts/cargo-test.sh"' } + post { + always { + // temporary workaround for leaked spdk_iscsi_conns files + sh 'sudo rm -f /dev/shm/*' + } + } } stage('mocha api tests') { agent { label 'nixos-mayastor' } @@ -49,6 +112,8 @@ pipeline { post { always { junit '*-xunit-report.xml' + // temporary workaround for leaked spdk_iscsi_conns files + sh 'sudo rm -f /dev/shm/*' } } } @@ -88,15 +153,25 @@ pipeline { when { beforeAgent true anyOf { - branch 'master' - branch 'release/*' + allOf { + branch 'master' + not { triggeredBy 'TimerTrigger' } + } + allOf { + branch 'release/*' + not { triggeredBy 'TimerTrigger' } + } allOf { branch 'develop' - triggeredBy 'TimerTrigger' + anyOf { + triggeredBy 'TimerTrigger' + triggeredBy cause: 'UserIdCause' + } } } } steps { + updateGithubCommitStatus(env.GIT_COMMIT, 'Started to test the commit', 'pending') withCredentials([usernamePassword(credentialsId: 'dockerhub', usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { sh 'echo $PASSWORD | docker login -u $USERNAME --password-stdin' } @@ -110,4 +185,29 @@ pipeline { } } } -} \ No newline at end of file + + // The main motivation for post block is that if all stages were skipped + // (which happens when running cron job and branch != develop) then we don't + // want to set commit status in github (jenkins will implicitly set it to + // success). + post { + always { + node(null) { + script { + // If no tests were run then we should neither be updating commit + // status in github nor send any slack messages + if (currentBuild.result != null) { + if (currentBuild.getResult() == 'SUCCESS') { + updateGithubCommitStatus(env.GIT_COMMIT, 'Looks good', 'success') + } else { + updateGithubCommitStatus(env.GIT_COMMIT, 'Test failed', 'failure') + } + if (env.BRANCH_NAME == 'develop') { + notifySlackUponStateChange(currentBuild) + } + } + } + } + } + } +} diff --git a/README.md b/README.md index 7e5e39964..fda43a18b 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ -# MayaStor ![CI-basic](https://github.com/openebs/Mayastor/workflows/CI-basic/badge.svg) [![Releases](https://img.shields.io/github/release/openebs/Mayastor/all.svg?style=flat-square)](https://github.com/openebs/Mayastor/releases) -[![built with nix](https://builtwithnix.org/badge.svg)](https://builtwithnix.org) -![CI-basic](https://github.com/openebs/Mayastor/workflows/CI-basic/badge.svg) -[![Slack](https://img.shields.io/badge/JOIN-SLACK-blue)]( https://openebs-community.slack.com) +# MayaStor + +[![Releases](https://img.shields.io/github/release/openebs/Mayastor/all.svg?style=flat-square)](https://github.com/openebs/Mayastor/releases) +[![CI-basic](https://mayastor-ci.mayadata.io/buildStatus/icon?job=Mayastor%2Fdevelop)](https://mayastor-ci.mayadata.io/blue/organizations/jenkins/Mayastor/activity/) +[![Slack](https://img.shields.io/badge/JOIN-SLACK-blue)](https://kubernetes.slack.com/messages/openebs) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fopenebs%2FMayaStor?ref=badge_shield) +[![built with nix](https://builtwithnix.org/badge.svg)](https://builtwithnix.org) OpenEBS Logo diff --git a/bors.toml b/bors.toml new file mode 100644 index 000000000..ec00b9795 --- /dev/null +++ b/bors.toml @@ -0,0 +1,8 @@ +status = [ "continuous-integration/jenkins/branch" ] +timeout_sec = 10000 +required_approvals = 2 +delete_merged_branches = true +block_labels = [ "DO NOT MERGE", "wip" ] +cut_body_after = "---" +committer.name = "mayastor-bors" +committer.email = "mayastor-bors@noreply.github.com" diff --git a/csi/moac/finalizer_helper.ts b/csi/moac/finalizer_helper.ts new file mode 100644 index 000000000..4eb89c7da --- /dev/null +++ b/csi/moac/finalizer_helper.ts @@ -0,0 +1,173 @@ +// +'use strict'; + +const k8s = require('@kubernetes/client-node'); +const log = require('./logger').Logger('finalizer_helper'); + +export class FinalizerHelper { + private kubeConfig: any; + private k8sApi: any; + private namespace: String; + private groupname: String; + private version: String; + private plural: String; + + constructor (namespace: String, groupname:String, version:String, plural:String) { + this.namespace = namespace; + this.groupname = groupname; + this.version = version; + this.kubeConfig = new k8s.KubeConfig(); + this.kubeConfig.loadFromDefault(); + this.k8sApi = this.kubeConfig.makeApiClient(k8s.CustomObjectsApi); + this.plural = plural; + } + + addFinalizer(body: any, instancename: String, finalizer: String) { + if (body.metadata.deletionTimestamp != undefined) { + log.warn(`addFinalizer(${instancename},${finalizer}), deletionTimestamp is set`); + return; + } + + if (body.metadata.finalizers != undefined) { + const index = body.metadata.finalizers.indexOf(finalizer); + if ( index > -1) { + log.debug(`@addFinalizer(${instancename},${finalizer}), finalizer already present`); + return; + } + body.metadata.finalizers.push(finalizer); + } else { + body.metadata.finalizers = [finalizer]; + } + + // TODO: use patchNamespacedCustomObject + this.k8sApi.replaceNamespacedCustomObject( + this.groupname, + this.version, + this.namespace, + this.plural, + instancename, + body) + .then((res:any) => { + log.debug(`added finalizer:${finalizer} to ${this.plural}:${instancename}`); + }) + .catch((err:any) => { + log.error(`add finalizer:${finalizer} to ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); + }); + } + + removeFinalizer(body: any, instancename: String, finalizer: String) { + if (body.metadata.finalizers == undefined) { + log.debug(`removeFinalizer(${instancename},${finalizer}), no finalizers defined.`); + return; + } + + const index = body.metadata.finalizers.indexOf(finalizer); + if ( index < 0) { + log.debug(`removeFinalizer(${instancename},${finalizer}), finalizer not found`); + return; + } + body.metadata.finalizers.splice(index, 1); + + // TODO: use patchNamespacedCustomObject + this.k8sApi.replaceNamespacedCustomObject( + this.groupname, + this.version, + this.namespace, + this.plural, + instancename, + body). + then((res:any) => { + log.debug(`removed finalizer:${finalizer} from ${this.plural}:${instancename}`); + }) + .catch((err: any) => { + log.error(`remove finalizer:${finalizer} from ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); + }); + } + + addFinalizerToCR(instancename: String, finalizer: String) { + this.k8sApi.getNamespacedCustomObject( + this.groupname, + this.version, + this.namespace, + this.plural, + instancename) + .then((customresource:any) => { + let body = customresource.body; + + if (body.metadata.deletionTimestamp != undefined) { + log.warn(`addFinalizerToCR(${instancename},${finalizer}), deletionTimestamp is set`); + return; + } + + if (body.metadata.finalizers != undefined) { + const index = body.metadata.finalizers.indexOf(finalizer); + if ( index > -1) { + log.debug(`@addFinalizerToCR(${instancename},${finalizer}), finalizer already present`); + return; + } + body.metadata.finalizers.splice(-1, 0, finalizer); + } else { + body.metadata.finalizers = [finalizer]; + } + + // TODO: use patchNamespacedCustomObject + this.k8sApi.replaceNamespacedCustomObject( + this.groupname, + this.version, + this.namespace, + this.plural, + instancename, + body) + .then((res:any) => { + log.debug(`added finalizer:${finalizer} to ${this.plural}:${instancename}`); + }) + .catch((err:any) => { + log.error(`add finalizer:${finalizer} to ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); + }); + }) + .catch((err: any) => { + log.error(`add finalizer:${finalizer} to ${this.plural}:${instancename}, get failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); + }); + } + + removeFinalizerFromCR(instancename: String, finalizer: String) { + this.k8sApi.getNamespacedCustomObject( + this.groupname, + this.version, + this.namespace, + this.plural, + instancename) + .then((customresource:any) => { + let body = customresource.body; + if (body.metadata.finalizers == undefined) { + log.debug(`removeFinalizerFromCR(${instancename},${finalizer}), no finalizers on pool`); + return; + } + + const index = body.metadata.finalizers.indexOf(finalizer); + if ( index < 0) { + log.debug(`removeFinalizerFromCR(${instancename},${finalizer}), finalizer not found`); + return; + } + body.metadata.finalizers.splice(index, 1); + + // TODO: use patchNamespacedCustomObject + this.k8sApi.replaceNamespacedCustomObject( + this.groupname, + this.version, + this.namespace, + this.plural, + instancename, + body). + then((res:any) => { + log.debug(`removed finalizer:${finalizer} from ${this.plural}:${instancename}`); + }) + .catch((err: any) => { + log.error(`remove finalizer:${finalizer} from ${this.plural}:${instancename}, update failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); + }); + }) + .catch((err: any) => { + log.error(`remove finalizer:${finalizer} from ${this.plural}:${instancename}, get failed: code=${err.body.code}, reason=${err.body.reason}, ${err.body.message}`); + }); + } +} diff --git a/csi/moac/pool_operator.js b/csi/moac/pool_operator.js index e69374174..3ea9d21cd 100644 --- a/csi/moac/pool_operator.js +++ b/csi/moac/pool_operator.js @@ -11,6 +11,8 @@ const log = require('./logger').Logger('pool-operator'); const Watcher = require('./watcher'); const EventStream = require('./event_stream'); const Workq = require('./workq'); +const { FinalizerHelper } = require('./finalizer_helper'); +const poolFinalizerValue = 'finalizer.mayastor.openebs.io'; // Load custom resource definition const crdPool = yaml.safeLoad( @@ -28,6 +30,12 @@ class PoolOperator { this.resource = {}; // List of storage pool resources indexed by name. this.watcher = null; // pool CRD watcher. this.workq = new Workq(); // for serializing pool operations + this.finalizerHelper = new FinalizerHelper( + this.namespace, + crdPool.spec.group, + crdPool.spec.version, + crdPool.spec.names.plural + ); } // Create pool CRD if it doesn't exist and augment client object so that CRD @@ -110,6 +118,8 @@ class PoolOperator { await self.workq.push(ev, self._onPoolEvent.bind(self)); } else if (ev.kind === 'node' && (ev.eventType === 'sync' || ev.eventType === 'mod')) { await self.workq.push(ev.object.name, self._onNodeSyncEvent.bind(self)); + } else if (ev.kind === 'replica' && (ev.eventType === 'new' || ev.eventType === 'del')) { + await self.workq.push(ev, self._onReplicaEvent.bind(self)); } }); } @@ -157,6 +167,35 @@ class PoolOperator { } } + // Handler for new/del replica events + // + // @param {object} ev Replica event as received from event stream. + // + async _onReplicaEvent (ev) { + const replica = ev.object; + + log.debug(`Received "${ev.eventType}" event for replica "${replica.name}"`); + + if (replica.pool === undefined) { + log.warn(`not processing for finalizers: pool not defined for replica ${replica.name}.`); + return; + } + + const pool = this.registry.getPool(replica.pool.name); + if (pool == null) { + log.warn(`not processing for finalizers: failed to retrieve pool ${replica.pool.name}`); + return; + } + + log.debug(`On "${ev.eventType}" event for replica "${replica.name}", replica count=${pool.replicas.length}`); + + if (pool.replicas.length > 0) { + this.finalizerHelper.addFinalizerToCR(replica.pool.name, poolFinalizerValue); + } else { + this.finalizerHelper.removeFinalizerFromCR(replica.pool.name, poolFinalizerValue); + } + } + // Stop the watcher, destroy event stream and reset resource cache. async stop () { this.watcher.removeAllListeners(); @@ -307,7 +346,8 @@ class PoolOperator { reason, pool.disks, pool.capacity, - pool.used + pool.used, + pool.replicas.length ); } @@ -324,8 +364,9 @@ class PoolOperator { // @param {string[]} [disks] Disk URIs. // @param {number} [capacity] Capacity of the pool in bytes. // @param {number} [used] Used bytes in the pool. + // @param {number} [replicacount] Count of replicas using the pool. // - async _updateResourceProps (name, state, reason, disks, capacity, used) { + async _updateResourceProps (name, state, reason, disks, capacity, used, replicacount) { // For the update of CRD status we need a real k8s pool object, change the // status in it and store it back. Another reason for grabbing the latest // version of CRD from watcher cache (even if this.resource contains an older @@ -362,8 +403,8 @@ class PoolOperator { if (used != null) { status.used = used; } - k8sPool.status = status; + k8sPool.status = status; try { await this.k8sClient.apis['openebs.io'].v1alpha1 .namespaces(this.namespace) @@ -372,6 +413,14 @@ class PoolOperator { } catch (err) { log.error(`Failed to update status of pool "${name}": ${err}`); } + + if (replicacount != null) { + if (replicacount === 0) { + this.finalizerHelper.removeFinalizer(k8sPool, name, poolFinalizerValue); + } else { + this.finalizerHelper.addFinalizer(k8sPool, name, poolFinalizerValue); + } + } } } diff --git a/csi/moac/test/mayastor_mock.js b/csi/moac/test/mayastor_mock.js index 9cbf700d3..7c2438851 100644 --- a/csi/moac/test/mayastor_mock.js +++ b/csi/moac/test/mayastor_mock.js @@ -69,8 +69,8 @@ class MayastorServer { const args = call.request; assertHasKeys( args, - ['name', 'disks', 'blockSize', 'ioIf'], - ['blockSize', 'ioIf'] + ['name', 'disks'], + [] ); var pool = self.pools.find((p) => p.name === args.name); if (!pool) { diff --git a/csi/moac/tsconfig.json b/csi/moac/tsconfig.json index e173192e5..d90c310b0 100644 --- a/csi/moac/tsconfig.json +++ b/csi/moac/tsconfig.json @@ -63,6 +63,7 @@ "files": [ "replica.ts", "pool.ts", - "nexus.ts" + "nexus.ts", + "finalizer_helper.ts" ] -} \ No newline at end of file +} diff --git a/csi/moac/volume.js b/csi/moac/volume.js index 11e3b37a2..d0a041b97 100644 --- a/csi/moac/volume.js +++ b/csi/moac/volume.js @@ -156,7 +156,7 @@ class Volume { } catch (err) { throw new GrpcError( GrpcCode.INTERNAL, - `Failed to set share pcol to ${share} for replica "${replica}": ${err}` + `Failed to set share protocol to ${share} for replica "${replica}": ${err}` ); } } @@ -529,7 +529,7 @@ class Volume { } catch (err) { throw new GrpcError( GrpcCode.INTERNAL, - `Failed to set share pcol to ${share} for replica "${replica}": ${err}` + `Failed to set share protocol to ${share} for replica "${replica}": ${err}` ); } } diff --git a/deploy/mayastor-daemonset-config.yaml b/deploy/mayastor-daemonset-config.yaml new file mode 100644 index 000000000..578a685c4 --- /dev/null +++ b/deploy/mayastor-daemonset-config.yaml @@ -0,0 +1,106 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: mayastor + name: mayastor + labels: + openebs/engine: mayastor +spec: + selector: + matchLabels: + app: mayastor + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: mayastor + spec: + hostNetwork: true + # To resolve services from mayastor namespace + dnsPolicy: ClusterFirstWithHostNet + nodeSelector: + openebs.io/engine: mayastor + kubernetes.io/arch: amd64 + # NOTE: Each container must have mem/cpu limits defined in order to + # belong to Guaranteed QoS class, hence can never get evicted in case of + # pressure unless they exceed those limits. limits and requests must be + # the same. + initContainers: + - name: message-bus-probe + image: busybox:latest + command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] + containers: + - name: mayastor + image: mayadata/mayastor:latest + imagePullPolicy: Always + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: IMPORT_NEXUSES + value: "false" + args: + - "-N$(MY_NODE_NAME)" + - "-g$(MY_POD_IP)" + - "-nnats" + - "-y/var/local/mayastor/config.yaml" + - "-C/var/local/mayastor/child-status-config.yaml" + securityContext: + privileged: true + volumeMounts: + - name: device + mountPath: /dev + - name: dshm + mountPath: /dev/shm + - name: configlocation + mountPath: /var/local/mayastor/ + - name: config + mountPath: /var/local/mayastor/config.yaml + - name: child-status-config + mountPath: /var/local/mayastor/child-status-config.yaml + resources: + limits: + cpu: "1" + memory: "500Mi" + hugepages-2Mi: "1Gi" + requests: + cpu: "1" + memory: "500Mi" + hugepages-2Mi: "1Gi" + ports: + - containerPort: 10124 + protocol: TCP + name: mayastor + volumes: + - name: device + hostPath: + path: /dev + type: Directory + - name: dshm + emptyDir: + medium: Memory + sizeLimit: "1Gi" + - name: hugepage + emptyDir: + medium: HugePages + - name: configlocation + hostPath: + path: /var/local/mayastor/ + type: DirectoryOrCreate + - name: config + hostPath: + path: /var/local/mayastor/config.yaml + type: FileOrCreate + - name: child-status-config + hostPath: + path: /var/local/mayastor/child-status-config.yaml + type: FileOrCreate diff --git a/deploy/mayastor-daemonset.yaml b/deploy/mayastor-daemonset.yaml index eab2a3eb9..69e3c62a7 100644 --- a/deploy/mayastor-daemonset.yaml +++ b/deploy/mayastor-daemonset.yaml @@ -29,6 +29,10 @@ spec: # belong to Guaranteed QoS class, hence can never get evicted in case of # pressure unless they exceed those limits. limits and requests must be # the same. + initContainers: + - name: message-bus-probe + image: busybox:latest + command: ['sh', '-c', 'until nc -vz nats 4222; do echo "Waiting for message bus..."; sleep 1; done;'] containers: - name: mayastor image: mayadata/mayastor:latest @@ -42,13 +46,10 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - - name: IMPORT_NEXUSES - value: "false" args: - "-N$(MY_NODE_NAME)" - "-g$(MY_POD_IP)" - "-nnats" - - "-y/var/local/mayastor/config.yaml" securityContext: privileged: true volumeMounts: @@ -56,10 +57,6 @@ spec: mountPath: /dev - name: dshm mountPath: /dev/shm - - name: configlocation - mountPath: /var/local/mayastor/ - - name: config - mountPath: /var/local/mayastor/config.yaml resources: limits: cpu: "1" @@ -85,11 +82,3 @@ spec: - name: hugepage emptyDir: medium: HugePages - - name: configlocation - hostPath: - path: /var/local/mayastor/ - type: DirectoryOrCreate - - name: config - hostPath: - path: /var/local/mayastor/config.yaml - type: FileOrCreate diff --git a/deploy/moac-rbac.yaml b/deploy/moac-rbac.yaml index 6752a9d2e..464799af5 100644 --- a/deploy/moac-rbac.yaml +++ b/deploy/moac-rbac.yaml @@ -29,7 +29,7 @@ rules: # must read mayastor pools info - apiGroups: ["openebs.io"] resources: ["mayastorpools"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "replace"] # must update mayastor pools status - apiGroups: ["openebs.io"] resources: ["mayastorpools/status"] diff --git a/doc/contribute.md b/doc/contribute.md index 5fdc76a67..f9f844399 100644 --- a/doc/contribute.md +++ b/doc/contribute.md @@ -20,3 +20,6 @@ nexus: add metadata for resuming rebuild Followed by a longer explanation. +## Bors + +We are using bors bot to automate testing and merging of PRs in scalable way. diff --git a/doc/jenkins.md b/doc/jenkins.md index 7b9426fd0..d8cc4f60f 100644 --- a/doc/jenkins.md +++ b/doc/jenkins.md @@ -109,8 +109,14 @@ for system configuration of nodes (as opposed to using ansible, salt, etc.). 5. Load initial Jenkins page. Create mayastor user and set a password. Don't install any plugins. -6. After initial configuration install "blue ocean", ssh agent and - "multibranch scan webhook trigger" Jenkins plugins. +6. After initial configuration install following plugins: + * blue ocean + * ssh agent + * multibranch scan webhook trigger + * embeddable build status + * pipeline stage view + * slack + * disable GitHub Multibranch Status 7. Enable read-only access for unauthenticated clients. diff --git a/mayastor-test/e2e/go.mod b/mayastor-test/e2e/go.mod new file mode 100644 index 000000000..b5a7c00ce --- /dev/null +++ b/mayastor-test/e2e/go.mod @@ -0,0 +1,19 @@ +module e2e-basic + +go 1.15 + +require ( + github.com/onsi/ginkgo v1.12.1 + github.com/onsi/gomega v1.10.1 + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.5.1 // indirect + github.com/stretchr/testify v1.5.1 // indirect + golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect + golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect + google.golang.org/appengine v1.6.5 // indirect + google.golang.org/protobuf v1.25.0 // indirect + k8s.io/api v0.18.6 + k8s.io/apimachinery v0.18.6 + k8s.io/client-go v0.18.6 + sigs.k8s.io/controller-runtime v0.6.2 +) diff --git a/mayastor-test/e2e/go.sum b/mayastor-test/e2e/go.sum new file mode 100644 index 000000000..5029a9981 --- /dev/null +++ b/mayastor-test/e2e/go.sum @@ -0,0 +1,510 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= +k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo= +k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= +k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= +k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw= +k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= +k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 h1:v8ud2Up6QK1lNOKFgiIVrZdMg7MpmSnvtrOieolJKoE= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= +sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/mayastor-test/e2e/install/README.md b/mayastor-test/e2e/install/README.md new file mode 100644 index 000000000..6309f98c8 --- /dev/null +++ b/mayastor-test/e2e/install/README.md @@ -0,0 +1,24 @@ +# Pre-requisites + +The test doesn't yet manage the lifecycle of the cluster being tested, +therefore the test hosts' kubeconfig must point to a Kubernetes cluster. +You can verify that the kubeconfig is setup correctly simply with +`kubectl get nodes`. + +The cluster under test must meet the following requirements: +* Have 3 nodes +* Each node must be configured per the quick start: + * At least 512 2MiB hugepages available + * Each node must be labelled for use by mayastor (ie "openebs.io/engine=mayastor") + +The test host must have the following installed: +* go (>= v1.15) +* ginkgo (tested with v1.2) +* kubectl (tested with v1.18) + +# Running the tests + +```sh +cd Mayastor/e2e/install +go test +``` \ No newline at end of file diff --git a/mayastor-test/e2e/install/install_test.go b/mayastor-test/e2e/install/install_test.go new file mode 100644 index 000000000..5deca5729 --- /dev/null +++ b/mayastor-test/e2e/install/install_test.go @@ -0,0 +1,131 @@ +package basic_test + +import ( + "context" + "fmt" + "os/exec" + "path" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/deprecated/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var testEnv *envtest.Environment + +// Encapsulate the logic to find where the deploy yamls are +func getDeployYamlDir() string { + _, filename, _, _ := runtime.Caller(0) + return path.Clean(filename + "/../../../../deploy") +} + +// Helper for passing yaml from the deploy directory to kubectl +func applyDeployYaml(filename string) { + cmd := exec.Command("kubectl", "apply", "-f", filename) + cmd.Dir = getDeployYamlDir() + _, err := cmd.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) +} + +// We expect this to fail a few times before it succeeds, +// so no throwing errors from here. +func mayastorReadyPodCount() int { + var mayastorDaemonSet appsv1.DaemonSet + if k8sClient.Get(context.TODO(), types.NamespacedName{Name: "mayastor", Namespace: "mayastor"}, &mayastorDaemonSet) != nil { + fmt.Println("Failed to get mayastor DaemonSet") + return -1 + } + + return int(mayastorDaemonSet.Status.CurrentNumberScheduled) +} + +// Install mayastor on the cluster under test. +// We deliberately call out to kubectl, rather than constructing the client-go +// objects, so that we can verfiy the local deploy yamls are correct. +func installMayastor() { + applyDeployYaml("namespace.yaml") + applyDeployYaml("moac-rbac.yaml") + applyDeployYaml("mayastorpoolcrd.yaml") + applyDeployYaml("nats-deployment.yaml") + applyDeployYaml("csi-daemonset.yaml") + applyDeployYaml("moac-deployment.yaml") + applyDeployYaml("mayastor-daemonset.yaml") + + // Given the yamls and the environment described in the test readme, + // we expect mayastor to be running on exactly 2 nodes. + Eventually(mayastorReadyPodCount(), + "60s", // timeout + "1s", // polling interval + ).Should(Equal(2)) +} + +func TestInstallSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Basic Install Suite") +} + +var _ = Describe("Mayastor setup", func() { + It("should install using yamls", func() { + installMayastor() + }) +}) + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + useCluster := true + testEnv = &envtest.Environment{ + UseExistingCluster: &useCluster, + AttachControlPlaneOutput: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + go func() { + err = k8sManager.Start(ctrl.SetupSignalHandler()) + Expect(err).ToNot(HaveOccurred()) + }() + + mgrSyncCtx, mgrSyncCtxCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer mgrSyncCtxCancel() + if synced := k8sManager.GetCache().WaitForCacheSync(mgrSyncCtx.Done()); !synced { + fmt.Println("Failed to sync") + } + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + // NB This only tears down the local structures for talking to the cluster, + // not the kubernetes cluster itself. + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/mayastor-test/test_cli.js b/mayastor-test/test_cli.js index d993f78ba..dca3e1ee7 100644 --- a/mayastor-test/test_cli.js +++ b/mayastor-test/test_cli.js @@ -61,8 +61,7 @@ describe('cli', function () { method: 'CreatePool', input: { name: POOL, - disks: [DISK], - blockSize: 512 + disks: [DISK] }, output: { name: POOL, @@ -294,7 +293,7 @@ describe('cli', function () { it('should create a pool', function (done) { const cmd = util.format( - '%s pool create -b 512 %s %s', + '%s pool create %s %s', EGRESS_CMD, POOL, DISK diff --git a/mayastor-test/test_common.js b/mayastor-test/test_common.js index 3de9c8d8d..044db6f28 100644 --- a/mayastor-test/test_common.js +++ b/mayastor-test/test_common.js @@ -15,7 +15,6 @@ const sudo = require('./sudo'); const SOCK = '/tmp/mayastor_test.sock'; const MS_CONFIG_PATH = '/tmp/mayastor_test.cfg'; -const SPDK_CONFIG_PATH = '/tmp/spdk_test.cfg'; const GRPC_PORT = 10124; const CSI_ENDPOINT = '/tmp/mayastor_csi_test.sock'; const CSI_ID = 'test-node-id'; @@ -163,36 +162,8 @@ function startProcess (command, args, env, closeCb, psName, suffix) { procs[procsIndex] = proc; } -// Start spdk process and return immediately. -function startSpdk (config, args, env) { - args = args || ['-r', SOCK]; - env = env || {}; - - if (config) { - fs.writeFileSync(SPDK_CONFIG_PATH, config); - args = args.concat(['-c', SPDK_CONFIG_PATH]); - } - - startProcess( - 'spdk', - args, - _.assign( - { - MAYASTOR_DELAY: '1' - }, - env - ), - () => { - try { - fs.unlinkSync(SPDK_CONFIG_PATH); - } catch (err) {} - }, - 'reactor_0' - ); -} - // Start mayastor process and return immediately. -function startMayastor (config, args, env, yaml, suffix) { +function startMayastor (config, args, env, suffix) { args = args || ['-r', SOCK, '-g', grpcEndpoint]; env = env || {}; let configPath = MS_CONFIG_PATH; @@ -200,14 +171,9 @@ function startMayastor (config, args, env, yaml, suffix) { configPath += suffix; } - if (yaml) { - fs.writeFileSync(configPath, yaml); - args = args.concat(['-y', configPath]); - } - if (config) { fs.writeFileSync(configPath, config); - args = args.concat(['-c', configPath]); + args = args.concat(['-y', configPath]); } startProcess( @@ -475,7 +441,6 @@ module.exports = { CSI_ENDPOINT, CSI_ID, SOCK, - startSpdk, startMayastor, startMayastorCsi, stopAll, diff --git a/mayastor-test/test_csi.js b/mayastor-test/test_csi.js index 5f6b42973..bb26ac338 100644 --- a/mayastor-test/test_csi.js +++ b/mayastor-test/test_csi.js @@ -149,7 +149,7 @@ describe('csi', function () { // NOTE: Don't use mayastor in setup - we test CSI interface and we don't want // to depend on correct function of mayastor iface in order to test CSI. before((done) => { - common.startMayastor(null, null, null, CONFIG); + common.startMayastor(CONFIG); common.startMayastorCsi(); var client = common.createGrpcClient(); diff --git a/mayastor-test/test_nexus.js b/mayastor-test/test_nexus.js index f8bd70682..b32d3b863 100644 --- a/mayastor-test/test_nexus.js +++ b/mayastor-test/test_nexus.js @@ -19,6 +19,7 @@ const url = require('url'); // just some UUID used for nexus ID const UUID = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff21'; const UUID2 = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff22'; +const TGTUUID = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff29'; // backend file for aio bdev const aioFile = '/tmp/aio-backend'; @@ -41,42 +42,28 @@ const doIscsiReplica = false; // test the nexus with implementation of replicas which are used in the // production. const configNexus = ` -[Malloc] - NumberOfLuns 1 - LunSizeInMB 64 - BlockSize 4096 +sync_disable: true +base_bdevs: + - uri: "malloc:///Malloc0?size_mb=64&blk_size=4096" `; // The config just for nvmf target which cannot run in the same process as // the nvmf initiator (SPDK limitation). const configNvmfTarget = ` -[Malloc] - NumberOfLuns 1 - LunSizeInMB 64 - BlockSize 4096 - -[Nvmf] - AcceptorPollRate 10000 - ConnectionScheduler RoundRobin - -[Transport] - Type TCP - # reduce memory requirements - NumSharedBuffers 64 - -[Subsystem1] - NQN nqn.2019-05.io.openebs:disk2 - Listen TCP 127.0.0.1:8420 - AllowAnyHost Yes - SN MAYASTOR0000000001 - MN NEXUSController1 - MaxNamespaces 1 - Namespace Malloc0 1 - +sync_disable: true +base_bdevs: + - uri: "malloc:///Malloc0?size_mb=64&blk_size=4096&uuid=${TGTUUID}" +nexus_opts: + nvmf_nexus_port: 4422 + nvmf_replica_port: 8420 + iscsi_enable: false +nvmf_tcp_tgt_conf: + max_namespaces: 2 # although not used we still have to reduce mem requirements for iSCSI -[iSCSI] - MaxSessions 1 - MaxConnectionsPerSession 1 +iscsi_tgt_conf: + max_sessions: 1 + max_connections_per_session: 1 +implicit_share_base: true `; var client; @@ -255,7 +242,7 @@ describe('nexus', function () { uuid: UUID, size: 131072, children: [ - 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2', + `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}`, `aio://${aioFile}?blk_size=4096` ] }; @@ -272,19 +259,23 @@ describe('nexus', function () { common.ensureNbdWritable, // start this as early as possible to avoid mayastor getting connection refused. (next) => { - // Start two spdk instances. The first one will hold the remote + // Start two Mayastor instances. The first one will hold the remote // nvmf target and the second one everything including nexus. // We must do this because if nvme initiator and target are in // the same instance, the SPDK will hang. // // In order not to exceed available memory in hugepages when running // two instances we use the -s option to limit allocated mem. - common.startSpdk(configNvmfTarget, [ + common.startMayastor(configNvmfTarget, [ '-r', '/tmp/target.sock', '-s', - '128' - ]); + '128', + '-g', + '127.0.0.1:10125' + ], + { MY_POD_IP: '127.0.0.1' }, + '_tgt'); common.waitFor((pingDone) => { // use harmless method to test if spdk is up and running common.jsonrpcCommand('/tmp/target.sock', 'bdev_get_bdevs', pingDone); @@ -355,7 +346,7 @@ describe('nexus', function () { children: [ 'bdev:///Malloc0', `aio://${aioFile}?blk_size=4096`, - 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2' + `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}` ] }; if (doIscsiReplica) args.children.push(`iscsi://iscsi://${externIp}:${iscsiReplicaPort}/iqn.2019-05.io.openebs:disk1`); @@ -378,7 +369,7 @@ describe('nexus', function () { assert.equal( nexus.children[2].uri, - 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2' + `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}` ); assert.equal(nexus.children[2].state, 'CHILD_ONLINE'); if (doIscsiReplica) { @@ -428,7 +419,7 @@ describe('nexus', function () { assert.equal( nexus.children[2].uri, - 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2' + `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}` ); assert.equal(nexus.children[2].state, 'CHILD_ONLINE'); if (doIscsiReplica) { @@ -454,7 +445,7 @@ describe('nexus', function () { it('should be able to remove one of its children', (done) => { const args = { uuid: UUID, - uri: 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2' + uri: `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}` }; client.removeChildNexus(args, (err) => { @@ -472,7 +463,7 @@ describe('nexus', function () { }); it('should be able to add the child back', (done) => { - const uri = 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2'; + const uri = `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}`; const args = { uuid: UUID, uri: uri, @@ -499,7 +490,7 @@ describe('nexus', function () { const args = { uuid: UUID2, size: 131072, - children: ['nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2'] + children: [`nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}`] }; client.createNexus(args, (err) => { @@ -724,7 +715,7 @@ describe('nexus', function () { size: 2 * diskSize, children: [ `aio://${aioFile}?blk_size=4096`, - 'nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:disk2' + `nvmf://127.0.0.1:8420/nqn.2019-05.io.openebs:${TGTUUID}` ] }; diff --git a/mayastor-test/test_rebuild.js b/mayastor-test/test_rebuild.js index ad5c205bf..27bfb569d 100644 --- a/mayastor-test/test_rebuild.js +++ b/mayastor-test/test_rebuild.js @@ -19,49 +19,6 @@ const child2 = '/tmp/child2'; const diskSize = 100 * 1024 * 1024; // nexus UUID const UUID = 'dbe4d7eb-118a-4d15-b789-a18d9af6ff21'; -// external IP address detected by common lib -const externIp = common.getMyIp(); - -// Instead of using mayastor grpc methods to create replicas we use a config -// file to create them. Advantage is that we don't depend on bugs in replica -// code (the nexus tests are more independent). Disadvantage is that we don't -// test the nexus with implementation of replicas which are used in the -// production. -const configNexus = ` -[Malloc] - NumberOfLuns 2 - LunSizeInMB 64 - BlockSize 4096 - -[iSCSI] - NodeBase "iqn.2019-05.io.openebs" - # Socket I/O timeout sec. (0 is infinite) - Timeout 30 - DiscoveryAuthMethod None - DefaultTime2Wait 2 - DefaultTime2Retain 60 - ImmediateData Yes - ErrorRecoveryLevel 0 - # Reduce mem requirements for iSCSI - MaxSessions 1 - MaxConnectionsPerSession 1 - -[PortalGroup1] - Portal GR1 0.0.0.0:3261 - -[InitiatorGroup1] - InitiatorName Any - Netmask ${externIp}/24 - -[TargetNode0] - TargetName "iqn.2019-05.io.openebs:disk1" - TargetAlias "Backend Malloc1" - Mapping PortalGroup1 InitiatorGroup1 - AuthMethod None - UseDigest Auto - LUN0 Malloc1 - QueueDepth 1 -`; const nexusArgs = { uuid: UUID, @@ -190,7 +147,7 @@ describe('rebuild tests', function () { fs.truncate(child2, diskSize, next); }, (next) => { - common.startMayastor(configNexus, ['-r', common.SOCK, '-g', common.grpcEndpoint, '-s', 386]); + common.startMayastor(null, ['-r', common.SOCK, '-g', common.grpcEndpoint, '-s', 384]); common.waitFor((pingDone) => { pingMayastor(pingDone); }, next); diff --git a/mayastor-test/test_replica.js b/mayastor-test/test_replica.js index f46fc7325..f276496f0 100644 --- a/mayastor-test/test_replica.js +++ b/mayastor-test/test_replica.js @@ -13,7 +13,6 @@ const path = require('path'); const { exec } = require('child_process'); const grpc = require('grpc'); const common = require('./test_common'); -const enums = require('./grpc_enums'); const POOL = 'tpool'; const DISK_FILE = '/tmp/mayastor_test_disk'; @@ -161,9 +160,9 @@ describe('replica', function () { it('should not create a pool with invalid block size', (done) => { client.createPool( - { name: POOL, disks: disks, block_size: 1238513 }, + { name: POOL, disks: disks.map((d) => `${d}?blk_size=1238513`) }, (err) => { - assert.equal(err.code, grpc.status.INVALID_ARGUMENT); + assert.equal(err.code, grpc.status.INTERNAL); done(); } ); @@ -172,7 +171,7 @@ describe('replica', function () { it('should create a pool with aio bdevs', (done) => { // explicitly specify aio as that always works client.createPool( - { name: POOL, disks: disks, io_if: enums.POOL_IO_AIO }, + { name: POOL, disks: disks.map((d) => `aio://${d}`) }, (err, res) => { if (err) return done(err); assert.equal(res.name, POOL); @@ -452,9 +451,9 @@ describe('replica', function () { }); }); - it('should create a pool with uring io_if', (done) => { + it('should create a pool with uring bdevs', (done) => { client.createPool( - { name: POOL, disks: disks.map((d) => `uring://${d}`), io_if: enums.POOL_IO_URING }, + { name: POOL, disks: disks.map((d) => `uring://${d}`) }, (err, res) => { if (err) return done(err); assert.equal(res.name, POOL); diff --git a/mayastor-test/test_snapshot.js b/mayastor-test/test_snapshot.js index 52e54d451..586f08400 100644 --- a/mayastor-test/test_snapshot.js +++ b/mayastor-test/test_snapshot.js @@ -30,11 +30,19 @@ nexus_opts: iscsi_enable: false iscsi_nexus_port: 3260 iscsi_replica_port: 3262 +pools: + - name: pool0 + disks: + - aio:///tmp/pool-backend + replicas: [] `; var client, client2; var disks; +// URI of Nexus published over NVMf +var nexusUri; + describe('snapshot', function () { this.timeout(10000); // for network tests we need long timeouts @@ -47,10 +55,16 @@ describe('snapshot', function () { if (!client2) { return done(new Error('Failed to initialize grpc client for 2nd Mayastor instance')); } - disks = ['aio://' + poolFile]; + disks = [poolFile]; async.series( [ + (next) => { + fs.writeFile(poolFile, '', next); + }, + (next) => { + fs.truncate(poolFile, diskSize, next); + }, // start this as early as possible to avoid mayastor getting connection refused. (next) => { // Start another mayastor instance for the remote nvmf target of the @@ -58,7 +72,7 @@ describe('snapshot', function () { // SPDK hangs if nvme initiator and target are in the same instance. // // Use -s option to limit hugepage allocation. - common.startMayastor(null, [ + common.startMayastor(config, [ '-r', '/tmp/target.sock', '-s', @@ -67,19 +81,12 @@ describe('snapshot', function () { '127.0.0.1:10125' ], null, - config, '_tgt'); common.waitFor((pingDone) => { // use harmless method to test if the mayastor is up and running client2.listPools({}, pingDone); }, next); }, - (next) => { - fs.writeFile(poolFile, '', next); - }, - (next) => { - fs.truncate(poolFile, diskSize, next); - }, (next) => { common.startMayastor(null, ['-r', common.SOCK, '-g', common.grpcEndpoint, '-s', 384]); @@ -116,10 +123,20 @@ describe('snapshot', function () { ); }); + it('should destroy the pool loaded from yaml', (done) => { + client2.destroyPool( + { name: poolName }, + (err, res) => { + if (err) return done(err); + done(); + } + ); + }); + it('should create a pool with aio bdevs', (done) => { // explicitly specify aio as that always works client2.createPool( - { name: poolName, disks: disks, io_if: enums.POOL_IO_AIO }, + { name: poolName, disks: disks.map((d) => `aio://${d}`) }, (err, res) => { if (err) return done(err); assert.equal(res.name, poolName); @@ -178,6 +195,21 @@ describe('snapshot', function () { }); }); + it('should publish the nexus on nvmf', (done) => { + client.publishNexus( + { + uuid: UUID, + share: enums.NEXUS_NVMF + }, + (err, res) => { + if (err) done(err); + assert(res.device_uri); + nexusUri = res.device_uri; + done(); + } + ); + }); + it('should create a snapshot on the nexus', (done) => { const args = { uuid: UUID }; client.createSnapshot(args, (err) => { @@ -197,6 +229,32 @@ describe('snapshot', function () { assert.equal(res.uuid.startsWith(replicaUuid + '-snap-'), true); assert.equal(res.share, 'REPLICA_NONE'); assert.match(res.uri, /^bdev:\/\/\//); + // Wait 1 second so that the 2nd snapshot has a different name and can + // be created successfully + setTimeout(done, 1000); + }); + }); + + it('should take snapshot on nvmf-published nexus', (done) => { + common.execAsRoot( + common.getCmdPath('initiator'), + [nexusUri, 'create-snapshot'], + done + ); + }); + + it('should list the 2 snapshots as replicas', (done) => { + client2.listReplicas({}, (err, res) => { + if (err) return done(err); + + res = res.replicas.filter((ent) => ent.pool === poolName); + assert.lengthOf(res, 3); + var i; + for (i = 1; i < 3; i++) { + assert.equal(res[i].uuid.startsWith(replicaUuid + '-snap-'), true); + assert.equal(res[i].share, 'REPLICA_NONE'); + assert.match(res[i].uri, /^bdev:\/\/\//); + } done(); }); }); diff --git a/mayastor/Cargo.toml b/mayastor/Cargo.toml index 0d17f519a..8397c0a4d 100644 --- a/mayastor/Cargo.toml +++ b/mayastor/Cargo.toml @@ -56,6 +56,7 @@ log = "0.4" nix = "0.16" once_cell = "1.3.1" pin-utils = "0.1" +proc-mounts = "0.2" prost = "0.6" prost-derive = "0.6" prost-types = "0.6" @@ -65,14 +66,17 @@ serde_yaml = "0.8" signal-hook = "0.1" snafu = "0.6" structopt = "0.3.11" -nats = "0.7.4" +nats = "0.8" tonic = "0.1" tower = "0.3" tracing = "0.1" tracing-futures = "0.2.4" tracing-log = "0.1.1" tracing-subscriber = "0.2.0" +udev = "0.4" url = "2.1" +smol = "1.0.0" +dns-lookup = "1.0.4" [dependencies.rpc] path = "../rpc" diff --git a/mayastor/src/bdev/mod.rs b/mayastor/src/bdev/mod.rs index f7c2d560f..648ca8a9a 100644 --- a/mayastor/src/bdev/mod.rs +++ b/mayastor/src/bdev/mod.rs @@ -9,7 +9,7 @@ pub use nexus::{ NexusStatus, VerboseError, }, - nexus_child::ChildStatus, + nexus_child::{ChildState, Reason}, nexus_child_error_store::{ActionType, NexusErrStore, QueryType}, nexus_child_status_config, nexus_label::{GPTHeader, GptEntry}, diff --git a/mayastor/src/bdev/nexus/nexus_bdev.rs b/mayastor/src/bdev/nexus/nexus_bdev.rs index 285d56369..be1e3574f 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev.rs @@ -40,7 +40,7 @@ use crate::{ nexus::{ instances, nexus_channel::{DREvent, NexusChannel, NexusChannelInner}, - nexus_child::{ChildError, ChildState, ChildStatus, NexusChild}, + nexus_child::{ChildError, ChildState, NexusChild}, nexus_io::{io_status, nvme_admin_opc, Bio}, nexus_iscsi::{NexusIscsiError, NexusIscsiTarget}, nexus_label::LabelError, @@ -160,6 +160,12 @@ pub enum Error { name ))] RemoveLastChild { child: String, name: String }, + #[snafu(display( + "Cannot fault the last healthy child {} of nexus {}", + child, + name + ))] + FaultingLastHealthyChild { child: String, name: String }, #[snafu(display("Failed to destroy child {} of nexus {}", child, name))] DestroyChild { source: NexusBdevError, @@ -505,7 +511,7 @@ impl Nexus { trace!("{}: closing, from state: {:?} ", self.name, self.state); self.children.iter_mut().for_each(|c| { - if c.state == ChildState::Open { + if c.state() == ChildState::Open { c.close(); } }); @@ -567,6 +573,8 @@ impl Nexus { } if r.await.unwrap() { + // Update the child states to remove them from the config file. + NexusChild::save_state_change(); Ok(()) } else { Err(Error::NexusDestroy { @@ -900,14 +908,14 @@ impl Nexus { .children .iter() // All children are online, so the Nexus is also online - .all(|c| c.status() == ChildStatus::Online) + .all(|c| c.state() == ChildState::Open) { NexusStatus::Online } else if self .children .iter() // at least one child online, so the Nexus is also online - .any(|c| c.status() == ChildStatus::Online) + .any(|c| c.state() == ChildState::Open) { NexusStatus::Degraded } else { diff --git a/mayastor/src/bdev/nexus/nexus_bdev_children.rs b/mayastor/src/bdev/nexus/nexus_bdev_children.rs index 2aea95b2f..d1f5c99d1 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_children.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_children.rs @@ -39,7 +39,7 @@ use crate::{ OpenChild, }, nexus_channel::DREvent, - nexus_child::{ChildState, ChildStatus, NexusChild}, + nexus_child::{ChildState, NexusChild}, nexus_child_status_config::ChildStatusConfig, nexus_label::{ LabelError, @@ -48,6 +48,7 @@ use crate::{ NexusLabelStatus, }, }, + Reason, VerboseError, }, core::Bdev, @@ -115,7 +116,7 @@ impl Nexus { e.verbose() ); match self.get_child_by_name(uri) { - Ok(child) => child.fault(), + Ok(child) => child.fault(Reason::RebuildFailed), Err(e) => error!( "Failed to find newly added child {}, error: {}", uri, @@ -181,7 +182,7 @@ impl Nexus { // it can never take part in the IO path // of the nexus until it's rebuilt from a healthy child. - child.out_of_sync(true); + child.fault(Reason::OutOfSync); if ChildStatusConfig::add(&child).is_err() { error!("Failed to add child status information"); } @@ -230,7 +231,7 @@ impl Nexus { }; self.children[idx].close(); - assert_eq!(self.children[idx].state, ChildState::Closed); + assert_eq!(self.children[idx].state(), ChildState::Closed); let mut child = self.children.remove(idx); self.child_count -= 1; @@ -274,7 +275,11 @@ impl Nexus { } /// fault a child device and reconfigure the IO channels - pub async fn fault_child(&mut self, name: &str) -> Result<(), Error> { + pub async fn fault_child( + &mut self, + name: &str, + reason: Reason, + ) -> Result<(), Error> { trace!("{}: fault child request for {}", self.name, name); if self.child_count < 2 { @@ -284,14 +289,32 @@ impl Nexus { }); } + let healthy_children = self + .children + .iter() + .filter(|c| c.state() == ChildState::Open) + .collect::>(); + + if healthy_children.len() == 1 && healthy_children[0].name == name { + // the last healthy child cannot be faulted + return Err(Error::FaultingLastHealthyChild { + name: self.name.clone(), + child: name.to_owned(), + }); + } + let cancelled_rebuilding_children = self.cancel_child_rebuild_jobs(name).await; let result = match self.children.iter_mut().find(|c| c.name == name) { Some(child) => { - if child.status() != ChildStatus::Faulted { - child.fault(); - self.reconfigure(DREvent::ChildFault).await; + match child.state() { + ChildState::Faulted(_) => {} + _ => { + child.fault(reason); + NexusChild::save_state_change(); + self.reconfigure(DREvent::ChildFault).await; + } } Ok(()) } @@ -348,7 +371,7 @@ impl Nexus { pub fn examine_child(&mut self, name: &str) -> bool { self.children .iter_mut() - .filter(|c| c.state == ChildState::Init && c.name == name) + .filter(|c| c.state() == ChildState::Init && c.name == name) .any(|c| { if let Some(bdev) = Bdev::lookup_by_name(name) { c.bdev = Some(bdev); @@ -515,7 +538,7 @@ impl Nexus { let mut blockcnt = std::u64::MAX; self.children .iter() - .filter(|c| c.state == ChildState::Open) + .filter(|c| c.state() == ChildState::Open) .map(|c| c.bdev.as_ref().unwrap().num_blocks()) .collect::>() .iter() diff --git a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs index b1a3b3700..345b071fa 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -16,7 +16,7 @@ use crate::{ RemoveRebuildJob, }, nexus_channel::DREvent, - nexus_child::{ChildState, ChildStatus}, + nexus_child::{ChildState, NexusChild, Reason}, }, VerboseError, }, @@ -36,7 +36,7 @@ impl Nexus { let src_child_name = match self .children .iter() - .find(|c| c.state == ChildState::Open && c.name != name) + .find(|c| c.state() == ChildState::Open && c.name != name) { Some(child) => Ok(child.name.clone()), None => Err(Error::NoRebuildSource { @@ -46,13 +46,15 @@ impl Nexus { let dst_child_name = match self.children.iter_mut().find(|c| c.name == name) { - Some(c) if c.status() == ChildStatus::Degraded => { + Some(c) + if c.state() == ChildState::Faulted(Reason::OutOfSync) => + { Ok(c.name.clone()) } Some(c) => Err(Error::ChildNotDegraded { child: name.to_owned(), name: self.name.clone(), - state: c.status().to_string(), + state: c.state().to_string(), }), None => Err(Error::ChildNotFound { child: name.to_owned(), @@ -236,13 +238,12 @@ impl Nexus { match job.state() { RebuildState::Completed => { - recovering_child.out_of_sync(false); + recovering_child.set_state(ChildState::Open); + NexusChild::save_state_change(); info!( "Child {} has been rebuilt successfully", recovering_child.name ); - - assert_eq!(recovering_child.status(), ChildStatus::Online); } RebuildState::Stopped => { info!( @@ -259,7 +260,7 @@ impl Nexus { { // todo: retry rebuild using another child as source? } - recovering_child.fault(); + recovering_child.fault(Reason::RebuildFailed); error!( "Rebuild job for child {} of nexus {} failed, error: {}", &job.destination, @@ -268,7 +269,7 @@ impl Nexus { ); } _ => { - recovering_child.fault(); + recovering_child.fault(Reason::RebuildFailed); error!( "Rebuild job for child {} of nexus {} failed with state {:?}", &job.destination, diff --git a/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs b/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs index 6f8b808d9..0bbf7973f 100644 --- a/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs +++ b/mayastor/src/bdev/nexus/nexus_bdev_snapshot.rs @@ -5,16 +5,16 @@ use rpc::mayastor::CreateSnapshotReply; use crate::{ bdev::nexus::nexus_bdev::{Error, Nexus}, core::BdevHandle, - replica::Replica, + lvs::Lvol, }; impl Nexus { /// Create a snapshot on all children pub async fn create_snapshot(&self) -> Result { - if let Ok(h) = BdevHandle::open_with_bdev(&self.bdev, true) { + if let Ok(h) = BdevHandle::open_with_bdev(&self.bdev, false) { match h.create_snapshot().await { Ok(t) => Ok(CreateSnapshotReply { - name: Replica::format_snapshot_name(&self.bdev.name(), t), + name: Lvol::format_snapshot_name(&self.bdev.name(), t), }), Err(_e) => Err(Error::FailedCreateSnapshot), } diff --git a/mayastor/src/bdev/nexus/nexus_channel.rs b/mayastor/src/bdev/nexus/nexus_channel.rs index 15251a93d..cce09bfe1 100644 --- a/mayastor/src/bdev/nexus/nexus_channel.rs +++ b/mayastor/src/bdev/nexus/nexus_channel.rs @@ -12,7 +12,7 @@ use spdk_sys::{ }; use crate::{ - bdev::{nexus::nexus_child::ChildStatus, Nexus}, + bdev::{nexus::nexus_child::ChildState, Nexus}, core::BdevHandle, }; @@ -90,7 +90,7 @@ impl NexusChannelInner { nexus .children .iter_mut() - .filter(|c| c.status() == ChildStatus::Online) + .filter(|c| c.state() == ChildState::Open) .for_each(|c| { self.ch.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), @@ -143,7 +143,7 @@ impl NexusChannel { nexus .children .iter_mut() - .filter(|c| c.status() == ChildStatus::Online) + .filter(|c| c.state() == ChildState::Open) .map(|c| { channels.ch.push( BdevHandle::try_from(c.get_descriptor().unwrap()).unwrap(), diff --git a/mayastor/src/bdev/nexus/nexus_child.rs b/mayastor/src/bdev/nexus/nexus_child.rs index 8b5ec143e..2f6938b19 100644 --- a/mayastor/src/bdev/nexus/nexus_child.rs +++ b/mayastor/src/bdev/nexus/nexus_child.rs @@ -8,7 +8,10 @@ use spdk_sys::{spdk_bdev_module_release_bdev, spdk_io_channel}; use crate::{ bdev::{ - nexus::nexus_child_status_config::ChildStatusConfig, + nexus::{ + nexus_child::ChildState::Faulted, + nexus_child_status_config::ChildStatusConfig, + }, NexusErrStore, }, core::{Bdev, BdevHandle, CoreError, Descriptor, DmaBuf}, @@ -35,8 +38,8 @@ pub enum ChildError { OpenChild { source: CoreError }, #[snafu(display("Claim child"))] ClaimChild { source: Errno }, - #[snafu(display("Child is closed"))] - ChildClosed {}, + #[snafu(display("Child is inaccessible"))] + ChildInaccessible {}, #[snafu(display("Invalid state of child"))] ChildInvalid {}, #[snafu(display("Opening child bdev without bdev pointer"))] @@ -55,81 +58,63 @@ pub enum ChildIoError { InvalidDescriptor { name: String }, } -#[derive(Debug, Clone, Copy, Serialize, PartialEq)] -pub enum ChildStatus { - /// available for RW - Online, - /// temporarily unavailable for R, out of sync with nexus (needs rebuild) - Degraded, - /// permanently unavailable for RW - Faulted, -} - -#[derive(Debug, Serialize, Deserialize, Default, Copy, Clone)] -pub(crate) struct StatusReasons { - /// Degraded - /// +#[derive(Debug, Serialize, PartialEq, Deserialize, Copy, Clone)] +pub enum Reason { + /// no particular reason for the child to be in this state + /// this is typically the init state + Unknown, /// out of sync - needs to be rebuilt - out_of_sync: bool, - /// temporarily closed - offline: bool, - - /// Faulted - /// fatal error, cannot be recovered - fatal_error: bool, + OutOfSync, + /// cannot open + CantOpen, + /// the child failed to rebuild successfully + RebuildFailed, + /// the child has been faulted due to I/O error(s) + IoError, + /// the child has been explicitly faulted due to a rpc call + Rpc, } -impl StatusReasons { - /// a fault occurred, it is not recoverable - fn fatal_error(&mut self) { - self.fatal_error = true; - } - - /// set offline - fn offline(&mut self, offline: bool) { - self.offline = offline; - } - - /// out of sync with nexus, needs a rebuild - fn out_of_sync(&mut self, out_of_sync: bool) { - self.out_of_sync = out_of_sync; +impl Display for Reason { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Unknown => write!(f, "Unknown"), + Self::OutOfSync => { + write!(f, "The child is out of sync and requires a rebuild") + } + Self::CantOpen => write!(f, "The child bdev could not be opened"), + Self::RebuildFailed => { + write!(f, "The child failed to rebuild successfully") + } + Self::IoError => write!(f, "The child had too many I/O errors"), + Self::Rpc => write!(f, "The child is faulted due to a rpc call"), + } } } -#[derive(Debug, Clone, Copy, Serialize, PartialEq)] -pub(crate) enum ChildState { +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +pub enum ChildState { /// child has not been opened, but we are in the process of opening it Init, /// cannot add this bdev to the parent as its incompatible property wise ConfigInvalid, /// the child is open for RW Open, - /// unusable by the nexus for RW + /// the child has been closed by the nexus Closed, + /// the child is faulted + Faulted(Reason), } -impl ToString for ChildState { - fn to_string(&self) -> String { - match *self { - ChildState::Init => "init", - ChildState::ConfigInvalid => "configInvalid", - ChildState::Open => "open", - ChildState::Closed => "closed", - } - .parse() - .unwrap() - } -} - -impl ToString for ChildStatus { - fn to_string(&self) -> String { - match *self { - ChildStatus::Degraded => "degraded", - ChildStatus::Faulted => "faulted", - ChildStatus::Online => "online", +impl Display for ChildState { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Faulted(r) => write!(f, "Faulted with reason {}", r), + Self::Init => write!(f, "Init"), + Self::ConfigInvalid => write!(f, "Config parameters are invalid"), + Self::Open => write!(f, "Child is open"), + Self::Closed => write!(f, "Closed"), } - .parse() - .unwrap() } } @@ -149,8 +134,8 @@ pub struct NexusChild { #[serde(skip_serializing)] pub(crate) desc: Option>, /// current state of the child - pub(crate) state: ChildState, - pub(crate) status_reasons: StatusReasons, + #[serde(skip_serializing)] + state: ChildState, /// descriptor obtained after opening a device #[serde(skip_serializing)] pub(crate) bdev_handle: Option, @@ -165,46 +150,60 @@ impl Display for NexusChild { let bdev = self.bdev.as_ref().unwrap(); writeln!( f, - "{}: {:?}/{:?}, blk_cnt: {}, blk_size: {}", + "{}: {:?}, blk_cnt: {}, blk_size: {}", self.name, - self.state, - self.status(), + self.state(), bdev.num_blocks(), bdev.block_len(), ) } else { - writeln!( - f, - "{}: state {:?}/{:?}", - self.name, - self.state, - self.status() - ) + writeln!(f, "{}: state {:?}", self.name, self.state()) } } } impl NexusChild { + pub(crate) fn set_state(&mut self, state: ChildState) { + trace!( + "{}: child {}: state change from {} to {}", + self.parent, + self.name, + self.state.to_string(), + state.to_string(), + ); + + self.state = state; + } + /// Open the child in RW mode and claim the device to be ours. If the child /// is already opened by someone else (i.e one of the targets) it will /// error out. /// /// only devices in the closed or Init state can be opened. + /// + /// A child can only be opened if: + /// - it's not faulted + /// - it's not already opened pub(crate) fn open( &mut self, parent_size: u64, ) -> Result { trace!("{}: Opening child device {}", self.parent, self.name); - if self.status() == ChildStatus::Faulted { - return Err(ChildError::ChildFaulted {}); - } - if self.state != ChildState::Closed && self.state != ChildState::Init { - return Err(ChildError::ChildNotClosed {}); - } - - if self.bdev.is_none() { - return Err(ChildError::OpenWithoutBdev {}); + // verify the state of the child before we open it + match self.state() { + ChildState::Faulted(reason) => { + error!( + "{}: can not open child {} reason {}", + self.parent, self.name, reason + ); + return Err(ChildError::ChildFaulted {}); + } + ChildState::Open => { + // the child (should) already be open + assert_eq!(self.bdev.is_some(), true); + } + _ => {} } let bdev = self.bdev.as_ref().unwrap(); @@ -212,23 +211,28 @@ impl NexusChild { let child_size = bdev.size_in_bytes(); if parent_size > child_size { error!( - "{}: child too small, parent size: {} child size: {}", - self.name, parent_size, child_size + "{}: child {} too small, parent size: {} child size: {}", + self.parent, self.name, parent_size, child_size ); - self.state = ChildState::ConfigInvalid; + + self.set_state(ChildState::ConfigInvalid); return Err(ChildError::ChildTooSmall { parent_size, child_size, }); } - self.desc = Some(Arc::new( - Bdev::open_by_name(&bdev.name(), true).context(OpenChild {})?, - )); + let desc = Arc::new(Bdev::open_by_name(&bdev.name(), true).map_err( + |source| { + self.set_state(Faulted(Reason::CantOpen)); + ChildError::OpenChild { + source, + } + }, + )?); - self.bdev_handle = Some( - BdevHandle::try_from(self.desc.as_ref().unwrap().clone()).unwrap(), - ); + self.bdev_handle = Some(BdevHandle::try_from(desc.clone()).unwrap()); + self.desc = Some(desc); let cfg = Config::get(); if cfg.err_store_opts.enable_err_store { @@ -236,46 +240,46 @@ impl NexusChild { Some(NexusErrStore::new(cfg.err_store_opts.err_store_size)); }; - self.state = ChildState::Open; + self.set_state(ChildState::Open); debug!("{}: child {} opened successfully", self.parent, self.name); - Ok(self.name.clone()) } - /// Fault the child following an unrecoverable error - pub(crate) fn fault(&mut self) { - self.close(); - self.status_reasons.fatal_error(); - NexusChild::save_state_change(); - } - /// Set the child as out of sync with the nexus - /// It requires a full rebuild before it can service IO - /// and remains degraded until such time - pub(crate) fn out_of_sync(&mut self, out_of_sync: bool) { - self.status_reasons.out_of_sync(out_of_sync); + /// Fault the child with a specific reason. + /// We do not close the child if it is out-of-sync because it will + /// subsequently be rebuilt. + pub(crate) fn fault(&mut self, reason: Reason) { + match reason { + Reason::OutOfSync => { + self.set_state(ChildState::Faulted(reason)); + } + _ => { + self._close(); + self.set_state(ChildState::Faulted(reason)); + } + } NexusChild::save_state_change(); } + /// Set the child as temporarily offline + /// TODO: channels need to be updated when bdevs are closed pub(crate) fn offline(&mut self) { self.close(); - self.status_reasons.offline(true); NexusChild::save_state_change(); } - /// Online a previously offlined child + + /// Online a previously offlined child. + /// The child is set out-of-sync so that it will be rebuilt. + /// TODO: channels need to be updated when bdevs are opened pub(crate) fn online( &mut self, parent_size: u64, ) -> Result { - if !self.status_reasons.offline { - return Err(ChildError::ChildNotOffline {}); - } - self.open(parent_size).map(|s| { - self.status_reasons.offline(false); - self.status_reasons.out_of_sync(true); - NexusChild::save_state_change(); - s - }) + let result = self.open(parent_size); + self.set_state(ChildState::Faulted(Reason::OutOfSync)); + NexusChild::save_state_change(); + result } /// Save the state of the children to the config file @@ -285,50 +289,14 @@ impl NexusChild { } } - /// Status of the child - /// Init - /// Degraded as it cannot service IO, temporarily - /// - /// ConfigInvalid - /// Faulted as it cannot ever service IO - /// - /// Open - /// Degraded if temporarily out of sync - /// Online otherwise - /// - /// Closed - /// Degraded if offline - /// otherwise Faulted as it cannot ever service IO - /// todo: better cater for the online/offline "states" - pub fn status(&self) -> ChildStatus { - match self.state { - ChildState::Init => ChildStatus::Degraded, - ChildState::ConfigInvalid => ChildStatus::Faulted, - ChildState::Closed => { - if self.status_reasons.fatal_error { - ChildStatus::Faulted - } else { - ChildStatus::Degraded - } - } - ChildState::Open => { - if self.status_reasons.out_of_sync { - ChildStatus::Degraded - } else if self.status_reasons.fatal_error { - ChildStatus::Faulted - } else { - ChildStatus::Online - } - } - } + /// returns the state of the child + pub fn state(&self) -> ChildState { + self.state } pub(crate) fn rebuilding(&self) -> bool { match RebuildJob::lookup(&self.name) { - Ok(_) => { - self.state == ChildState::Open - && self.status_reasons.out_of_sync - } + Ok(_) => self.state() == ChildState::Faulted(Reason::OutOfSync), Err(_) => false, } } @@ -344,8 +312,8 @@ impl NexusChild { } } - /// close the bdev -- we have no means of determining if this succeeds - pub(crate) fn close(&mut self) -> ChildState { + /// closed the descriptor and handle, does not destroy the bdev + fn _close(&mut self) { trace!("{}: Closing child {}", self.parent, self.name); if let Some(bdev) = self.bdev.as_ref() { unsafe { @@ -354,16 +322,18 @@ impl NexusChild { } } } - // just to be explicit let hdl = self.bdev_handle.take(); let desc = self.desc.take(); drop(hdl); drop(desc); + } - // we leave the child structure around for when we want reopen it - self.state = ChildState::Closed; - self.state + /// close the bdev -- we have no means of determining if this succeeds + pub(crate) fn close(&mut self) -> ChildState { + self._close(); + self.set_state(ChildState::Closed); + ChildState::Closed } /// create a new nexus child @@ -375,7 +345,6 @@ impl NexusChild { desc: None, ch: std::ptr::null_mut(), state: ChildState::Init, - status_reasons: Default::default(), bdev_handle: None, err_store: None, } @@ -384,7 +353,7 @@ impl NexusChild { /// destroy the child bdev pub(crate) async fn destroy(&mut self) -> Result<(), NexusBdevError> { trace!("destroying child {:?}", self); - assert_eq!(self.state, ChildState::Closed); + assert_eq!(self.state(), ChildState::Closed); if let Some(_bdev) = &self.bdev { bdev_destroy(&self.name).await } else { @@ -393,17 +362,23 @@ impl NexusChild { } } - /// returns if a child can be written to - pub fn can_rw(&self) -> bool { - self.state == ChildState::Open && self.status() != ChildStatus::Faulted + /// Check if the child is in a state that can service I/O. + /// When out-of-sync, the child is still accessible (can accept I/O) + /// because: + /// 1. An added child starts in the out-of-sync state and may require its + /// label and metadata to be updated + /// 2. It needs to be rebuilt + fn is_accessible(&self) -> bool { + self.state() == ChildState::Open + || self.state() == ChildState::Faulted(Reason::OutOfSync) } /// return references to child's bdev and descriptor /// both must be present - otherwise it is considered an error pub fn get_dev(&self) -> Result<(&Bdev, &BdevHandle), ChildError> { - if !self.can_rw() { - info!("{}: Closed child: {}", self.parent, self.name); - return Err(ChildError::ChildClosed {}); + if !self.is_accessible() { + info!("{}: Child is inaccessible: {}", self.parent, self.name); + return Err(ChildError::ChildInaccessible {}); } if let Some(bdev) = &self.bdev { diff --git a/mayastor/src/bdev/nexus/nexus_child_error_store.rs b/mayastor/src/bdev/nexus/nexus_child_error_store.rs index 1a56bc894..a02113ff1 100644 --- a/mayastor/src/bdev/nexus/nexus_child_error_store.rs +++ b/mayastor/src/bdev/nexus/nexus_child_error_store.rs @@ -8,15 +8,18 @@ use serde::export::{fmt::Error, Formatter}; use spdk_sys::{spdk_bdev, spdk_bdev_io_type}; use crate::{ - bdev::nexus::{ - nexus_bdev, - nexus_bdev::{ - nexus_lookup, - Error::{ChildMissing, ChildMissingErrStore}, - Nexus, + bdev::{ + nexus::{ + nexus_bdev, + nexus_bdev::{ + nexus_lookup, + Error::{ChildMissing, ChildMissingErrStore}, + Nexus, + }, + nexus_child::{ChildState, NexusChild}, + nexus_io::{io_status, io_type}, }, - nexus_child::{ChildState, NexusChild}, - nexus_io::{io_status, io_type}, + Reason, }, core::{Cores, Reactors}, subsys::Config, @@ -279,7 +282,7 @@ impl Nexus { trace!("Adding error record {} bdev {:?}", io_op_type, bdev); for child in nexus.children.iter_mut() { if child.bdev.as_ref().unwrap().as_ptr() as *const _ == bdev { - if child.state == ChildState::Open { + if child.state() == ChildState::Open { if child.err_store.is_some() { child.err_store.as_mut().unwrap().add_record( io_op_type, @@ -299,7 +302,11 @@ impl Nexus { { let child_name = child.name.clone(); info!("Faulting child {}", child_name); - if nexus.fault_child(&child_name).await.is_err() { + if nexus + .fault_child(&child_name, Reason::IoError) + .await + .is_err() + { error!( "Failed to fault the child {}", child_name, @@ -316,7 +323,7 @@ impl Nexus { return; } let child_name = child.name.clone(); - trace!("Ignoring error response sent to non-open child {}, state {:?}", child_name, child.state); + trace!("Ignoring error response sent to non-open child {}, state {:?}", child_name, child.state()); return; } } diff --git a/mayastor/src/bdev/nexus/nexus_child_status_config.rs b/mayastor/src/bdev/nexus/nexus_child_status_config.rs index 57a72f204..f51e9dd72 100644 --- a/mayastor/src/bdev/nexus/nexus_child_status_config.rs +++ b/mayastor/src/bdev/nexus/nexus_child_status_config.rs @@ -15,7 +15,7 @@ use crate::bdev::nexus::{ instances, nexus_channel::DREvent, - nexus_child::{NexusChild, StatusReasons}, + nexus_child::{ChildState, NexusChild}, }; use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; @@ -28,7 +28,7 @@ pub static STATUS_CONFIG: OnceCell = OnceCell::new(); #[derive(Serialize, Deserialize, Debug)] pub struct ChildStatusConfig { - status: HashMap, + status: HashMap, } impl Default for ChildStatusConfig { @@ -90,7 +90,7 @@ impl ChildStatusConfig { "Apply state to child {}, reasons {:?}", child.name, status ); - child.status_reasons = *status; + child.set_state(*status); } }); nexus.reconfigure(DREvent::ChildStatusSync).await; @@ -126,9 +126,7 @@ impl ChildStatusConfig { instances().iter().for_each(|nexus| { nexus.children.iter().for_each(|child| { - status_cfg - .status - .insert(child.name.clone(), child.status_reasons); + status_cfg.status.insert(child.name.clone(), child.state()); }); }); @@ -154,7 +152,7 @@ impl ChildStatusConfig { let mut cfg = ChildStatusConfig { status: HashMap::new(), }; - cfg.status.insert(child.name.clone(), child.status_reasons); + cfg.status.insert(child.name.clone(), child.state()); ChildStatusConfig::do_save(Some(cfg)) } diff --git a/mayastor/src/bdev/nexus/nexus_module.rs b/mayastor/src/bdev/nexus/nexus_module.rs index 828cc271b..e786456a3 100644 --- a/mayastor/src/bdev/nexus/nexus_module.rs +++ b/mayastor/src/bdev/nexus/nexus_module.rs @@ -22,7 +22,7 @@ use crate::{ use super::instances; -const NEXUS_NAME: &str = "NEXUS_CAS_MODULE"; +pub const NEXUS_NAME: &str = "NEXUS_CAS_MODULE"; pub static NEXUS_MODULE: Lazy = Lazy::new(NexusModule::new); diff --git a/mayastor/src/bin/cli/cli.rs b/mayastor/src/bin/cli/cli.rs index 93478ffc4..6ea82f30a 100644 --- a/mayastor/src/bin/cli/cli.rs +++ b/mayastor/src/bin/cli/cli.rs @@ -14,6 +14,7 @@ use crate::context::Context; mod bdev_cli; mod context; +mod device_cli; mod nexus_child_cli; mod nexus_cli; mod pool_cli; @@ -77,6 +78,7 @@ async fn main() -> Result<(), Status> { .subcommand(nexus_cli::subcommands()) .subcommand(replica_cli::subcommands()) .subcommand(bdev_cli::subcommands()) + .subcommand(device_cli::subcommands()) .subcommand(rebuild_cli::subcommands()) .subcommand(snapshot_cli::subcommands()) .get_matches(); @@ -85,6 +87,7 @@ async fn main() -> Result<(), Status> { match matches.subcommand() { ("bdev", Some(args)) => bdev_cli::handler(ctx, args).await?, + ("device", Some(args)) => device_cli::handler(ctx, args).await?, ("nexus", Some(args)) => nexus_cli::handler(ctx, args).await?, ("pool", Some(args)) => pool_cli::handler(ctx, args).await?, ("replica", Some(args)) => replica_cli::handler(ctx, args).await?, diff --git a/mayastor/src/bin/cli/device_cli.rs b/mayastor/src/bin/cli/device_cli.rs new file mode 100644 index 000000000..15b5943e0 --- /dev/null +++ b/mayastor/src/bin/cli/device_cli.rs @@ -0,0 +1,185 @@ +//! +//! methods to obtain information about block devices on the current host + +use super::context::Context; +use ::rpc::mayastor as rpc; +use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; +use colored_json::ToColoredJson; +use tonic::Status; + +pub fn subcommands<'a, 'b>() -> App<'a, 'b> { + let list = SubCommand::with_name("list") + .about("List available (ie. unused) block devices") + .arg( + Arg::with_name("all") + .short("a") + .long("all") + .takes_value(false) + .help("List all block devices (ie. also include devices currently in use)"), + ) + .arg( + Arg::with_name("raw") + .long("raw") + .takes_value(false) + .help("Display output as raw JSON"), + ); + + SubCommand::with_name("device") + .settings(&[ + AppSettings::SubcommandRequiredElseHelp, + AppSettings::ColoredHelp, + AppSettings::ColorAlways, + ]) + .about("Host devices") + .subcommand(list) +} + +pub async fn handler( + ctx: Context, + matches: &ArgMatches<'_>, +) -> Result<(), Status> { + match matches.subcommand() { + ("list", Some(args)) => list_block_devices(ctx, args).await, + (cmd, _) => { + Err(Status::not_found(format!("command {} does not exist", cmd))) + } + } +} + +fn get_partition_type(device: &rpc::BlockDevice) -> String { + if let Some(partition) = &device.partition { + format!("{}:{}", partition.scheme, partition.typeid) + } else { + String::from("") + } +} + +async fn list_block_devices( + mut ctx: Context, + matches: &ArgMatches<'_>, +) -> Result<(), Status> { + let all = matches.is_present("all"); + + ctx.v2(&format!( + "Requesting list of {} block devices", + if all { "all" } else { "available" } + )); + + let reply = ctx + .client + .list_block_devices(rpc::ListBlockDevicesRequest { + all, + }) + .await?; + + if matches.is_present("raw") { + println!( + "{}", + serde_json::to_string_pretty(&reply.into_inner()) + .unwrap() + .to_colored_json_auto() + .unwrap() + ); + return Ok(()); + } + + let devices: &Vec = &reply.get_ref().devices; + + if devices.is_empty() { + ctx.v1("No devices found"); + return Ok(()); + } + + if all { + let table = devices + .iter() + .map(|device| { + let fstype: String; + let uuid: String; + let mountpoint: String; + + if let Some(filesystem) = &device.filesystem { + fstype = filesystem.fstype.clone(); + uuid = filesystem.uuid.clone(); + mountpoint = filesystem.mountpoint.clone(); + } else { + fstype = String::from(""); + uuid = String::from(""); + mountpoint = String::from(""); + } + + vec![ + device.devname.clone(), + device.devtype.clone(), + device.devmajor.to_string(), + device.devminor.to_string(), + device.size.to_string(), + String::from(if device.available { "yes" } else { "no" }), + device.model.clone(), + get_partition_type(&device), + fstype, + uuid, + mountpoint, + device.devpath.clone(), + device + .devlinks + .iter() + .map(|s| format!("\"{}\"", s)) + .collect::>() + .join(" "), + ] + }) + .collect(); + + ctx.print_list( + vec![ + "DEVNAME", + "DEVTYPE", + ">MAJOR", + "MINOR", + ">SIZE", + "AVAILABLE", + "MODEL", + "PARTTYPE", + "FSTYPE", + "FSUUID", + "MOUNTPOINT", + "DEVPATH", + "DEVLINKS", + ], + table, + ); + } else { + let table = devices + .iter() + .map(|device| { + vec![ + device.devname.clone(), + device.devtype.clone(), + device.devmajor.to_string(), + device.devminor.to_string(), + device.size.to_string(), + device.model.clone(), + get_partition_type(&device), + device.devpath.clone(), + device + .devlinks + .iter() + .map(|s| format!("\"{}\"", s)) + .collect::>() + .join(" "), + ] + }) + .collect(); + + ctx.print_list( + vec![ + "DEVNAME", "DEVTYPE", ">MAJOR", "MINOR", ">SIZE", "MODEL", + "PARTTYPE", "DEVPATH", "DEVLINKS", + ], + table, + ); + } + + Ok(()) +} diff --git a/mayastor/src/bin/cli/pool_cli.rs b/mayastor/src/bin/cli/pool_cli.rs index cda25cd9b..e116185cf 100644 --- a/mayastor/src/bin/cli/pool_cli.rs +++ b/mayastor/src/bin/cli/pool_cli.rs @@ -2,25 +2,11 @@ use super::context::Context; use ::rpc::mayastor as rpc; use byte_unit::Byte; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; -use tonic::{Code, Status}; +use tonic::Status; pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let create = SubCommand::with_name("create") .about("Create storage pool") - .arg( - Arg::with_name("block-size") - .short("b") - .long("block-size") - .value_name("NUMBER") - .help("block size of the underlying devices"), - ) - .arg( - Arg::with_name("io-if") - .short("i") - .long("io-if") - .value_name("IF") - .help("I/O interface for the underlying devices"), - ) .arg( Arg::with_name("pool") .required(true) @@ -78,24 +64,12 @@ async fn create( .unwrap() .map(|dev| dev.to_owned()) .collect(); - let block_size = value_t!(matches.value_of("block-size"), u32).unwrap_or(0); - let io_if = match matches.value_of("io-if") { - None | Some("auto") => Ok(rpc::PoolIoIf::PoolIoAuto as i32), - Some("aio") => Ok(rpc::PoolIoIf::PoolIoAio as i32), - Some("uring") => Ok(rpc::PoolIoIf::PoolIoUring as i32), - Some(_) => Err(Status::new( - Code::Internal, - "Invalid value of I/O interface".to_owned(), - )), - }?; ctx.v2(&format!("Creating pool {}", name)); ctx.client .create_pool(rpc::CreatePoolRequest { name: name.clone(), disks, - block_size, - io_if, }) .await?; ctx.v1(&format!("Created pool {}", name)); diff --git a/mayastor/src/core/env.rs b/mayastor/src/core/env.rs index 467dd70d2..c5ab7ee7b 100644 --- a/mayastor/src/core/env.rs +++ b/mayastor/src/core/env.rs @@ -14,7 +14,7 @@ use std::{ use byte_unit::{Byte, ByteUnit}; use futures::{channel::oneshot, future}; -use once_cell::sync::Lazy; +use once_cell::sync::{Lazy, OnceCell}; use snafu::Snafu; use structopt::StructOpt; use tokio::{runtime::Builder, task}; @@ -48,8 +48,7 @@ use crate::{ }, grpc, logger, - nats, - subsys::Config, + subsys::{self, Config}, target::iscsi, }; @@ -73,19 +72,6 @@ fn parse_mb(src: &str) -> Result { } } -/// If endpoint is Some() and is missing a port number then add the provided -/// one. -fn add_default_port(endpoint: Option, port: u16) -> Option { - match endpoint { - Some(ep) => Some(if ep.contains(':') { - ep - } else { - format!("{}:{}", ep, port) - }), - None => None, - } -} - #[derive(Debug, StructOpt)] #[structopt( name = "Mayastor", @@ -97,9 +83,9 @@ pub struct MayastorCliArgs { #[structopt(short = "c")] /// Path to the configuration file if any pub config: Option, - #[structopt(short = "g")] - /// IP address and port for gRPC server to listen on - pub grpc_endpoint: Option, + #[structopt(short = "g", default_value = grpc::default_endpoint_str())] + /// IP address and port (optional) for the gRPC server to listen on + pub grpc_endpoint: String, #[structopt(short = "L")] /// Enable logging for sub components pub log_components: Vec, @@ -110,8 +96,8 @@ pub struct MayastorCliArgs { /// Name of the node where mayastor is running (ID used by control plane) pub node_name: Option, #[structopt(short = "n")] - /// IP address and port of the NATS server - pub nats_endpoint: Option, + /// Hostname/IP and port (optional) of the message bus server + pub mbus_endpoint: Option, /// The maximum amount of hugepage memory we are allowed to allocate in MiB /// (default: all) #[structopt( @@ -144,8 +130,8 @@ pub struct MayastorCliArgs { impl Default for MayastorCliArgs { fn default() -> Self { Self { - grpc_endpoint: None, - nats_endpoint: None, + grpc_endpoint: grpc::default_endpoint().to_string(), + mbus_endpoint: None, node_name: None, env_context: None, reactor_mask: "0x1".into(), @@ -208,9 +194,9 @@ type Result = std::result::Result; #[derive(Debug, Clone)] pub struct MayastorEnvironment { pub config: Option, - node_name: String, - nats_endpoint: Option, - grpc_endpoint: Option, + pub node_name: String, + pub mbus_endpoint: Option, + pub grpc_endpoint: Option, mayastor_config: Option, child_status_config: Option, delay_subsystem_init: bool, @@ -244,7 +230,7 @@ impl Default for MayastorEnvironment { Self { config: None, node_name: "mayastor-node".into(), - nats_endpoint: None, + mbus_endpoint: None, grpc_endpoint: None, mayastor_config: None, child_status_config: None, @@ -293,7 +279,6 @@ async fn do_shutdown(arg: *mut c_void) { warn!("Mayastor stopped non-zero: {}", rc); } - nats::message_bus_stop(); iscsi::fini(); unsafe { @@ -345,11 +330,12 @@ struct SubsystemCtx { sender: futures::channel::oneshot::Sender, } +static MAYASTOR_DEFAULT_ENV: OnceCell = OnceCell::new(); impl MayastorEnvironment { pub fn new(args: MayastorCliArgs) -> Self { Self { - grpc_endpoint: add_default_port(args.grpc_endpoint, 10124), - nats_endpoint: add_default_port(args.nats_endpoint, 4222), + grpc_endpoint: Some(grpc::endpoint(args.grpc_endpoint)), + mbus_endpoint: subsys::mbus_endpoint(args.mbus_endpoint), node_name: args.node_name.unwrap_or_else(|| "mayastor-node".into()), config: args.config, mayastor_config: args.mayastor_config, @@ -363,6 +349,21 @@ impl MayastorEnvironment { env_context: args.env_context, ..Default::default() } + .setup_static() + } + + fn setup_static(self) -> Self { + MAYASTOR_DEFAULT_ENV.get_or_init(|| self.clone()); + self + } + + /// Get the global environment (first created on new) + /// or otherwise the default one (used by the tests) + pub fn global_or_default() -> Self { + match MAYASTOR_DEFAULT_ENV.get() { + Some(env) => env.clone(), + None => MayastorEnvironment::default(), + } } /// configure signal handling @@ -653,6 +654,9 @@ impl MayastorEnvironment { /// initialize the core, call this before all else pub fn init(mut self) -> Self { + // initialise the message bus + subsys::message_bus_init(); + // setup the logger as soon as possible self.init_logger().unwrap(); @@ -744,9 +748,7 @@ impl MayastorEnvironment { F: FnOnce() + 'static, { type FutureResult = Result<(), ()>; - let grpc_endpoint = self.grpc_endpoint.clone(); - let nats_endpoint = self.nats_endpoint.clone(); - let node_name = self.node_name.clone(); + let grpc_endpoint = self.grpc_endpoint; self.init(); let mut rt = Builder::new() @@ -764,16 +766,12 @@ impl MayastorEnvironment { let mut futures: Vec< Pin>>, > = Vec::new(); - if let Some(grpc_ep) = grpc_endpoint.as_ref() { + if let Some(grpc_endpoint) = grpc_endpoint { futures.push(Box::pin(grpc::MayastorGrpcServer::run( - grpc_ep, + grpc_endpoint, ))); - if let Some(nats_ep) = nats_endpoint.as_ref() { - futures.push(Box::pin(nats::message_bus_run( - nats_ep, &node_name, grpc_ep, - ))); - } - }; + } + futures.push(Box::pin(subsys::Registration::run())); futures.push(Box::pin(master)); let _out = future::try_join_all(futures).await; info!("reactors stopped"); diff --git a/mayastor/src/core/handle.rs b/mayastor/src/core/handle.rs index 00fe25aec..01d3d0790 100644 --- a/mayastor/src/core/handle.rs +++ b/mayastor/src/core/handle.rs @@ -4,7 +4,6 @@ use std::{ mem::ManuallyDrop, os::raw::c_void, sync::Arc, - time::{SystemTime, UNIX_EPOCH}, }; use futures::channel::oneshot; @@ -15,7 +14,7 @@ use spdk_sys::{ spdk_bdev_desc, spdk_bdev_free_io, spdk_bdev_io, - spdk_bdev_nvme_admin_passthru, + spdk_bdev_nvme_admin_passthru_ro, spdk_bdev_read, spdk_bdev_reset, spdk_bdev_write, @@ -26,6 +25,7 @@ use crate::{ bdev::nexus::nexus_io::nvme_admin_opc, core::{Bdev, CoreError, Descriptor, DmaBuf, DmaError, IoChannel}, ffihelper::cb_arg, + subsys, }; /// A handle to a bdev, is an interface to submit IO. The ['Descriptor'] may be @@ -211,13 +211,7 @@ impl BdevHandle { pub async fn create_snapshot(&self) -> Result { let mut cmd = spdk_sys::spdk_nvme_cmd::default(); cmd.set_opc(nvme_admin_opc::CREATE_SNAPSHOT.into()); - // encode snapshot time in cdw10/11 - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - cmd.__bindgen_anon_1.cdw10 = now as u32; - cmd.__bindgen_anon_2.cdw11 = (now >> 32) as u32; + let now = subsys::set_snapshot_time(&mut cmd); debug!("Creating snapshot at {}", now); self.nvme_admin(&cmd).await?; Ok(now as u64) @@ -240,8 +234,10 @@ impl BdevHandle { ) -> Result { trace!("Sending nvme_admin {}", nvme_cmd.opc()); let (s, r) = oneshot::channel::(); + // Use the spdk-sys variant spdk_bdev_nvme_admin_passthru that + // assumes read commands let errno = unsafe { - spdk_bdev_nvme_admin_passthru( + spdk_bdev_nvme_admin_passthru_ro( self.desc.as_ptr(), self.channel.as_ptr(), &*nvme_cmd, diff --git a/mayastor/src/grpc/mayastor_grpc.rs b/mayastor/src/grpc/mayastor_grpc.rs index 37d74350a..74efd70cd 100644 --- a/mayastor/src/grpc/mayastor_grpc.rs +++ b/mayastor/src/grpc/mayastor_grpc.rs @@ -17,6 +17,7 @@ use crate::{ bdev::{ nexus::{instances, nexus_bdev}, nexus_create, + Reason, }, grpc::{ nexus_grpc::{ @@ -29,6 +30,7 @@ use crate::{ sync_config, GrpcResult, }, + host::blk_device, }; #[derive(Debug)] @@ -217,7 +219,7 @@ impl mayastor_server::Mayastor for MayastorSvc { let uri = args.uri.clone(); debug!("Faulting child {} on nexus {}", uri, uuid); locally! { async move { - nexus_lookup(&args.uuid)?.fault_child(&args.uri).await + nexus_lookup(&args.uuid)?.fault_child(&args.uri, Reason::Rpc).await }}; info!("Faulted child {} on nexus {}", uri, uuid); Ok(Response::new(Null {})) @@ -413,4 +415,17 @@ impl mayastor_server::Mayastor for MayastorSvc { }) .await } + + #[instrument(level = "debug", err)] + async fn list_block_devices( + &self, + request: Request, + ) -> GrpcResult { + let args = request.into_inner(); + let reply = ListBlockDevicesReply { + devices: blk_device::list_block_devices(args.all).await?, + }; + trace!("{:?}", reply); + Ok(Response::new(reply)) + } } diff --git a/mayastor/src/grpc/mod.rs b/mayastor/src/grpc/mod.rs index be98a8b5a..9c96e8cec 100644 --- a/mayastor/src/grpc/mod.rs +++ b/mayastor/src/grpc/mod.rs @@ -83,3 +83,42 @@ where } result } + +macro_rules! default_ip { + () => { + "0.0.0.0" + }; +} +macro_rules! default_port { + () => { + 10124 + }; +} + +/// Default server port +pub fn default_port() -> u16 { + default_port!() +} + +/// Default endpoint - ip:port +pub fn default_endpoint_str() -> &'static str { + concat!(default_ip!(), ":", default_port!()) +} + +/// Default endpoint - ip:port +pub fn default_endpoint() -> std::net::SocketAddr { + default_endpoint_str() + .parse() + .expect("Expected a valid endpoint") +} + +/// If endpoint is missing a port number then add the default one. +pub fn endpoint(endpoint: String) -> std::net::SocketAddr { + (if endpoint.contains(':') { + endpoint + } else { + format!("{}:{}", endpoint, default_port()) + }) + .parse() + .expect("Invalid gRPC endpoint") +} diff --git a/mayastor/src/grpc/nexus_grpc.rs b/mayastor/src/grpc/nexus_grpc.rs index 1ef000d92..8ffc361a1 100644 --- a/mayastor/src/grpc/nexus_grpc.rs +++ b/mayastor/src/grpc/nexus_grpc.rs @@ -8,17 +8,24 @@ use crate::{ bdev::nexus::{ instances, nexus_bdev::{Error, Nexus, NexusStatus}, - nexus_child::{ChildStatus, NexusChild}, + nexus_child::{ChildState, NexusChild, Reason}, }, rebuild::RebuildJob, }; -impl From for rpc::ChildState { - fn from(child: ChildStatus) -> Self { +/// Map the internal child states into rpc child states (i.e. the states that +/// the control plane sees) +impl From for rpc::ChildState { + fn from(child: ChildState) -> Self { match child { - ChildStatus::Faulted => rpc::ChildState::ChildFaulted, - ChildStatus::Degraded => rpc::ChildState::ChildDegraded, - ChildStatus::Online => rpc::ChildState::ChildOnline, + ChildState::Init => rpc::ChildState::ChildDegraded, + ChildState::ConfigInvalid => rpc::ChildState::ChildFaulted, + ChildState::Open => rpc::ChildState::ChildOnline, + ChildState::Closed => rpc::ChildState::ChildDegraded, + ChildState::Faulted(reason) => match reason { + Reason::OutOfSync => rpc::ChildState::ChildDegraded, + _ => rpc::ChildState::ChildFaulted, + }, } } } @@ -40,7 +47,7 @@ impl NexusChild { pub fn to_grpc(&self) -> rpc::Child { rpc::Child { uri: self.name.clone(), - state: rpc::ChildState::from(self.status()) as i32, + state: rpc::ChildState::from(self.state()) as i32, rebuild_progress: self.get_rebuild_progress(), } } diff --git a/mayastor/src/grpc/server.rs b/mayastor/src/grpc/server.rs index 6b5817e72..c793c4e84 100644 --- a/mayastor/src/grpc/server.rs +++ b/mayastor/src/grpc/server.rs @@ -9,12 +9,12 @@ use rpc::mayastor::{ pub struct MayastorGrpcServer {} impl MayastorGrpcServer { - pub async fn run(endpoint: &str) -> Result<(), ()> { + pub async fn run(endpoint: std::net::SocketAddr) -> Result<(), ()> { info!("gRPC server configured at address {}", endpoint); let svc = Server::builder() .add_service(MayastorRpcServer::new(MayastorSvc {})) .add_service(BdevRpcServer::new(BdevSvc {})) - .serve(endpoint.parse().unwrap()); + .serve(endpoint); match svc.await { Ok(_) => Ok(()), diff --git a/mayastor/src/host/blk_device.rs b/mayastor/src/host/blk_device.rs new file mode 100644 index 000000000..af22fcc09 --- /dev/null +++ b/mayastor/src/host/blk_device.rs @@ -0,0 +1,327 @@ +//! +//! This module implements the list_block_devices() gRPC method +//! for listing available disk devices on the current host. +//! +//! The relevant information is obtained via udev. +//! The method works by iterating through udev records and selecting block +//! (ie. SUBSYSTEM=block) devices that represent either disks or disk +//! partitions. For each such device, it is then determined as to whether the +//! device is available for use. +//! +//! A device is currently deemed to be "available" if it satisfies the following +//! criteria: +//! - the device has a non-zero size +//! - the device is of an acceptable type as determined by well known device +//! numbers (eg. SCSI disks) +//! - the device represents either a disk with no partitions or a disk +//! partition of an acceptable type (Linux filesystem partitions only at +//! present) +//! - the device currently contains no filesystem or volume id (although this +//! logically implies that the device is not currently mounted, for the sake +//! of consistency, the mount table is also checked to ENSURE that the device +//! is not mounted) + +use std::{ + collections::HashMap, + ffi::{OsStr, OsString}, + io::Error, +}; + +use proc_mounts::{MountInfo, MountIter}; +use rpc::mayastor::{ + block_device::{Filesystem, Partition}, + BlockDevice, +}; +use udev::{Device, Enumerator}; + +// Struct representing a property value in a udev::Device struct (and possibly +// elsewhere). It is used to provide conversions via various "From" trait +// implementations below. +struct Property<'a>(Option<&'a OsStr>); + +impl From> for String { + fn from(property: Property) -> Self { + String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or("")) + } +} + +impl From> for Option { + fn from(property: Property) -> Self { + property.0.map(|s| s.to_str()).flatten().map(String::from) + } +} + +impl From> for Option { + fn from(property: Property) -> Self { + Option::::from(property) + .map(|s| s.parse().ok()) + .flatten() + } +} + +impl From> for u32 { + fn from(property: Property) -> Self { + Option::::from(property).unwrap_or(0) + } +} + +impl From> for Option { + fn from(property: Property) -> Self { + Option::::from(property) + .map(|s| s.parse().ok()) + .flatten() + } +} + +impl From> for u64 { + fn from(property: Property) -> Self { + Option::::from(property).unwrap_or(0) + } +} + +// Determine the type of devices which may be potentially presented +// as "available" for use. +fn usable_device(devmajor: &u32) -> bool { + const DEVICE_TYPES: [u32; 4] = [ + 7, // Loopback devices + 8, // SCSI disk devices + 43, // Network block devices + 259, // Block Extended Major + ]; + + if DEVICE_TYPES.iter().any(|m| m == devmajor) { + return true; + } + + // TODO: add extra logic here as needed for devices with dynamically + // allocated major numbers + + false +} + +// Determine the type of partitions which may be potentially presented +// as "available" for use +fn usable_partition(partition: &Option) -> bool { + const GPT_PARTITION_TYPES: [&str; 1] = [ + "0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux + ]; + + const MBR_PARTITION_TYPES: [&str; 1] = [ + "0x83", // Linux + ]; + + if let Some(part) = partition { + if part.scheme == "gpt" { + return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid); + } + if part.scheme == "dos" { + return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid); + } + return false; + } + + true +} + +// Determine if device is provided internally via mayastor. +// At present this simply involves examining the value of +// the udev "ID_MODEL" property. +fn mayastor_device(device: &Device) -> bool { + match device + .property_value("ID_MODEL") + .map(|s| s.to_str()) + .flatten() + { + Some("Mayastor NVMe controller") => true, // NVMF + Some("Nexus_CAS_Driver") => true, // iSCSI + _ => false, + } +} + +// Create a new Partition object from udev::Device properties +fn new_partition(parent: Option<&str>, device: &Device) -> Option { + if let Some(devtype) = device.property_value("DEVTYPE") { + if devtype.to_str() == Some("partition") { + return Some(Partition { + parent: String::from(parent.unwrap_or("")), + number: Property(device.property_value("PARTN")).into(), + name: Property(device.property_value("PARTNAME")).into(), + scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME")) + .into(), + typeid: Property(device.property_value("ID_PART_ENTRY_TYPE")) + .into(), + uuid: Property(device.property_value("ID_PART_ENTRY_UUID")) + .into(), + }); + } + } + None +} + +// Create a new Filesystem object from udev::Device properties +// and the list of current filesystem mounts. +// Note that the result can be None if there is no filesystem +// associated with this Device. +fn new_filesystem( + device: &Device, + mountinfo: Option<&MountInfo>, +) -> Option { + let mut fstype: Option = + Property(device.property_value("ID_FS_TYPE")).into(); + + if fstype.is_none() { + fstype = mountinfo.map(|m| m.fstype.clone()); + } + + let label: Option = + Property(device.property_value("ID_FS_LABEL")).into(); + + let uuid: Option = + Property(device.property_value("ID_FS_UUID")).into(); + + // Do no return an actual object if none of the fields therein have actual + // values. + if fstype.is_none() + && label.is_none() + && uuid.is_none() + && mountinfo.is_none() + { + return None; + } + + Some(Filesystem { + fstype: fstype.unwrap_or_else(|| String::from("")), + label: label.unwrap_or_else(|| String::from("")), + uuid: uuid.unwrap_or_else(|| String::from("")), + mountpoint: mountinfo + .map(|m| String::from(m.dest.to_string_lossy())) + .unwrap_or_else(|| String::from("")), + }) +} + +// Create a new BlockDevice object from collected information. +// This function also contains the logic for determining whether +// or not the device that this represents is "available" for use. +fn new_device( + parent: Option<&str>, + include: bool, + device: &Device, + mounts: &HashMap, +) -> Option { + if let Some(devname) = device.property_value("DEVNAME") { + let partition = new_partition(parent, device); + let filesystem = new_filesystem(device, mounts.get(devname)); + let devmajor: u32 = Property(device.property_value("MAJOR")).into(); + let size: u64 = Property(device.attribute_value("size")).into(); + + let available = include + && size > 0 + && !mayastor_device(device) + && usable_device(&devmajor) + && (partition.is_none() || usable_partition(&partition)) + && filesystem.is_none(); + + return Some(BlockDevice { + devname: String::from(devname.to_str().unwrap_or("")), + devtype: Property(device.property_value("DEVTYPE")).into(), + devmajor, + devminor: Property(device.property_value("MINOR")).into(), + model: Property(device.property_value("ID_MODEL")).into(), + devpath: Property(device.property_value("DEVPATH")).into(), + devlinks: device + .property_value("DEVLINKS") + .map(|s| s.to_str()) + .flatten() + .unwrap_or("") + .split(' ') + .filter(|&s| s != "") + .map(String::from) + .collect(), + size, + partition, + filesystem, + available, + }); + } + None +} + +// Get the list of current filesystem mounts. +fn get_mounts() -> Result, Error> { + let mut table: HashMap = HashMap::new(); + + for entry in MountIter::new()? { + if let Ok(mount) = entry { + table.insert(OsString::from(mount.source.clone()), mount); + } + } + + Ok(table) +} + +// Iterate through udev to generate a list of all (block) devices +// with DEVTYPE == "disk" +fn get_disks( + all: bool, + mounts: &HashMap, +) -> Result, Error> { + let mut list: Vec = Vec::new(); + + let mut enumerator = Enumerator::new()?; + + enumerator.match_subsystem("block")?; + enumerator.match_property("DEVTYPE", "disk")?; + + for entry in enumerator.scan_devices()? { + if let Some(devname) = entry.property_value("DEVNAME") { + let partitions = get_partitions(devname.to_str(), &entry, mounts)?; + + if let Some(device) = + new_device(None, partitions.is_empty(), &entry, &mounts) + { + if all || device.available { + list.push(device); + } + } + + for device in partitions { + if all || device.available { + list.push(device); + } + } + } + } + + Ok(list) +} + +// Iterate through udev to generate a list of all (block) devices +// associated with parent device +fn get_partitions( + parent: Option<&str>, + disk: &Device, + mounts: &HashMap, +) -> Result, Error> { + let mut list: Vec = Vec::new(); + + let mut enumerator = Enumerator::new()?; + + enumerator.match_parent(disk)?; + enumerator.match_property("DEVTYPE", "partition")?; + + for entry in enumerator.scan_devices()? { + if let Some(device) = new_device(parent, true, &entry, &mounts) { + list.push(device); + } + } + + Ok(list) +} + +/// Return a list of block devices on the current host. +/// The parameter controls whether to return list containing +/// all matching devices, or just those deemed to be available. +pub async fn list_block_devices(all: bool) -> Result, Error> { + let mounts = get_mounts()?; + get_disks(all, &mounts) +} diff --git a/mayastor/src/host/mod.rs b/mayastor/src/host/mod.rs new file mode 100644 index 000000000..13d238869 --- /dev/null +++ b/mayastor/src/host/mod.rs @@ -0,0 +1 @@ +pub mod blk_device; diff --git a/mayastor/src/lib.rs b/mayastor/src/lib.rs index dd8433121..8458120f8 100644 --- a/mayastor/src/lib.rs +++ b/mayastor/src/lib.rs @@ -14,10 +14,10 @@ pub mod core; pub mod delay; pub mod ffihelper; pub mod grpc; +pub mod host; pub mod jsonrpc; pub mod logger; pub mod lvs; -pub mod nats; pub mod nexus_uri; pub mod pool; pub mod rebuild; diff --git a/mayastor/src/logger.rs b/mayastor/src/logger.rs index 3b501de43..8840b07fb 100644 --- a/mayastor/src/logger.rs +++ b/mayastor/src/logger.rs @@ -1,7 +1,10 @@ use std::{ffi::CStr, os::raw::c_char, str::FromStr}; use tracing_log::format_trace; -use tracing_subscriber::fmt::{format::FmtSpan, time::FormatTime, Subscriber}; +use tracing_subscriber::{ + fmt::{format::FmtSpan, time::FormatTime, Subscriber}, + EnvFilter, +}; use spdk_sys::{spdk_log_get_print_level, spdk_log_level}; @@ -71,14 +74,18 @@ impl FormatTime for CustomTime<'_> { /// We might want to suppress certain messages, as some of them are redundant, /// in particular, the NOTICE messages as such, they are mapped to debug. pub fn init(level: &str) { - let subscriber = Subscriber::builder() + let builder = Subscriber::builder() .with_timer(CustomTime("%FT%T%.9f%Z")) - .with_span_events(FmtSpan::FULL) - .with_max_level( - tracing::Level::from_str(level).unwrap_or(tracing::Level::TRACE), - ) - .finish(); + .with_span_events(FmtSpan::FULL); - tracing::subscriber::set_global_default(subscriber) - .expect("failed to set default subscriber"); + if let Ok(filter) = EnvFilter::try_from_default_env() { + let subscriber = builder.with_env_filter(filter).finish(); + tracing::subscriber::set_global_default(subscriber) + } else { + let max_level = + tracing::Level::from_str(level).unwrap_or(tracing::Level::INFO); + let subscriber = builder.with_max_level(max_level).finish(); + tracing::subscriber::set_global_default(subscriber) + } + .expect("failed to set default subscriber"); } diff --git a/mayastor/src/lvs/lvol.rs b/mayastor/src/lvs/lvol.rs index 778763680..b7f345e78 100644 --- a/mayastor/src/lvs/lvol.rs +++ b/mayastor/src/lvs/lvol.rs @@ -14,9 +14,12 @@ use tracing::instrument; use spdk_sys::{ spdk_blob_get_xattr_value, + spdk_blob_is_read_only, + spdk_blob_is_snapshot, spdk_blob_set_xattr, spdk_blob_sync_md, spdk_lvol, + vbdev_lvol_create_snapshot, vbdev_lvol_destroy, vbdev_lvol_get_from_bdev, }; @@ -32,6 +35,7 @@ use crate::{ IntoCString, }, lvs::{error::Error, lvs_pool::Lvs}, + subsys::NvmfReq, }; /// properties we allow for being set on the lvol, this information is stored on @@ -215,6 +219,16 @@ impl Lvol { unsafe { self.0.as_ref().thin_provision } } + /// returns a boolean indicating if the lvol is read-only + pub fn is_read_only(&self) -> bool { + unsafe { spdk_blob_is_read_only(self.0.as_ref().blob) } + } + + /// returns a boolean indicating if the lvol is a snapshot + pub fn is_snapshot(&self) -> bool { + unsafe { spdk_blob_is_snapshot(self.0.as_ref().blob) } + } + /// destroy the lvol #[instrument(level = "debug", err)] pub async fn destroy(self) -> Result { @@ -259,6 +273,13 @@ impl Lvol { let blob = unsafe { self.0.as_ref().blob }; assert_ne!(blob.is_null(), true); + if self.is_snapshot() { + warn!("ignoring set property on snapshot {}", self.name()); + return Ok(()); + } + if self.is_read_only() { + warn!("{} is read-only", self.name()); + } match prop { PropValue::Shared(val) => { let name = PropName::from(prop).to_string().into_cstring(); @@ -330,4 +351,53 @@ impl Lvol { } } } + + /// Format snapshot name + /// base_name is the nexus or replica UUID + pub fn format_snapshot_name(base_name: &str, snapshot_time: u64) -> String { + format!("{}-snap-{}", base_name, snapshot_time) + } + + /// Create a snapshot + pub async fn create_snapshot( + &self, + nvmf_req: &NvmfReq, + snapshot_name: &str, + ) { + extern "C" fn snapshot_done_cb( + nvmf_req_ptr: *mut c_void, + _lvol_ptr: *mut spdk_lvol, + errno: i32, + ) { + let nvmf_req = NvmfReq::from(nvmf_req_ptr); + let mut rsp = nvmf_req.response(); + let nvme_status = rsp.status(); + + nvme_status.set_sct(0); // SPDK_NVME_SCT_GENERIC + nvme_status.set_sc(match errno { + 0 => 0, + _ => { + debug!("vbdev_lvol_create_snapshot errno {}", errno); + 0x06 // SPDK_NVME_SC_INTERNAL_DEVICE_ERROR + } + }); + + // From nvmf_bdev_ctrlr_complete_cmd + unsafe { + spdk_sys::spdk_nvmf_request_complete(nvmf_req.0.as_ptr()); + } + } + + let c_snapshot_name = snapshot_name.into_cstring(); + unsafe { + vbdev_lvol_create_snapshot( + self.0.as_ptr(), + c_snapshot_name.as_ptr(), + Some(snapshot_done_cb), + nvmf_req.0.as_ptr().cast(), + ) + }; + + info!("Creating snapshot {}", snapshot_name); + } } diff --git a/mayastor/src/lvs/lvs_pool.rs b/mayastor/src/lvs/lvs_pool.rs index 8fd32eabb..05d658a84 100644 --- a/mayastor/src/lvs/lvs_pool.rs +++ b/mayastor/src/lvs/lvs_pool.rs @@ -27,9 +27,10 @@ use spdk_sys::{ LVS_CLEAR_WITH_NONE, SPDK_BDEV_IO_TYPE_UNMAP, }; +use url::Url; use crate::{ - bdev::Uri, + bdev::{util::uring, Uri}, core::{Bdev, Share, Uuid}, ffihelper::{cb_arg, pair, AsStr, ErrnoResult, FfiResult, IntoCString}, lvs::{Error, Lvol, PropName, PropValue}, @@ -299,27 +300,21 @@ impl Lvs { }); } - // this is a legacy argument, should not be used typically - if args.block_size != 512 - && args.block_size != 4096 - && args.block_size != 0 - { - return Err(Error::Invalid { - source: Errno::EINVAL, - msg: format!( - "invalid block size specified {}", - args.block_size - ), - }); - } - - // fixup the device uri's to URL + // default to uring if kernel supports it let disks = args .disks .iter() .map(|d| { - if d.starts_with("/dev") { - format!("aio://{}", d) + if Url::parse(d).is_err() { + format!( + "{}://{}", + if uring::kernel_support() { + "uring" + } else { + "aio" + }, + d, + ) } else { d.clone() } @@ -368,7 +363,7 @@ impl Lvs { name, }) } - // try to create the the pool + // try to create the pool Err(Error::Import { source, .. }) if source == Errno::EILSEQ => { diff --git a/mayastor/src/nats.rs b/mayastor/src/nats.rs deleted file mode 100644 index 610e587ff..000000000 --- a/mayastor/src/nats.rs +++ /dev/null @@ -1,235 +0,0 @@ -//! NATS message bus connecting mayastor to control plane (moac). -//! -//! It is designed to make sending events to control plane easy in the future. -//! That's the reason for global sender protected by the mutex, that normally -//! would not be needed and currently is used only to terminate the message bus. - -use std::{env, sync::Mutex, time::Duration}; - -use futures::{channel::mpsc, select, FutureExt, StreamExt}; -use nats::asynk::{connect, Connection}; -use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; -use snafu::Snafu; -use tokio::time::delay_for; - -/// Mayastor sends registration messages in this interval (kind of heart-beat) -const HB_INTERVAL: u64 = 10; - -/// The end of channel used to send messages to or terminate the NATS client. -static SENDER: Lazy>>> = - Lazy::new(|| Mutex::new(None)); - -/// Errors for pool operations. -/// -/// Note: The types here that would be normally used as source for snafu errors -/// do not implement Error trait required by Snafu. So they are renamed to -/// "cause" attribute and we use .map_err() instead of .context() when creating -/// them. -#[derive(Debug, Snafu)] -enum Error { - #[snafu(display( - "Failed to connect to the NATS server {}: {:?}", - server, - cause - ))] - ConnectFailed { - cause: std::io::Error, - server: String, - }, - #[snafu(display( - "Cannot issue requests if message bus hasn't been started" - ))] - NotStarted {}, - #[snafu(display("Failed to queue register request: {:?}", cause))] - QueueRegister { cause: std::io::Error }, - #[snafu(display("Failed to queue deregister request: {:?}", cause))] - QueueDeregister { cause: std::io::Error }, -} - -/// Register message payload -#[derive(Serialize, Deserialize, Debug)] -struct RegisterArgs { - id: String, - #[serde(rename = "grpcEndpoint")] - grpc_endpoint: String, -} - -/// Deregister message payload -#[derive(Serialize, Deserialize, Debug)] -struct DeregisterArgs { - id: String, -} - -/// Message bus implementation -struct MessageBus { - /// NATS server endpoint - server: String, - /// Name of the node that mayastor is running on - node: String, - /// gRPC endpoint of the server provided by mayastor - grpc_endpoint: String, - /// NATS client - client: Option, - /// heartbeat interval (how often the register message is sent) - hb_interval: Duration, -} - -impl MessageBus { - /// Create message bus object with given parameters. - pub fn new(server: &str, node: &str, grpc_endpoint: &str) -> Self { - Self { - server: server.to_owned(), - node: node.to_owned(), - grpc_endpoint: grpc_endpoint.to_owned(), - client: None, - hb_interval: Duration::from_secs( - match env::var("MAYASTOR_HB_INTERVAL") { - Ok(val) => match val.parse::() { - Ok(num) => num, - Err(_) => HB_INTERVAL, - }, - Err(_) => HB_INTERVAL, - }, - ), - } - } - - /// Connect to the server and start emitting periodic register messages. - /// Runs until the sender side of mpsc channel is closed. - pub async fn run( - &mut self, - mut receiver: mpsc::Receiver<()>, - ) -> Result<(), Error> { - assert!(self.client.is_none()); - - // We retry connect in loop until successful. Once connected the nats - // library will handle reconnections for us. - while self.client.is_none() { - self.client = match self.connect().await { - Ok(client) => Some(client), - Err(err) => { - error!("{}", err); - delay_for(self.hb_interval).await; - continue; - } - }; - } - info!("Connected to the NATS server {}", self.server); - - info!( - "Registering '{}' and grpc server {} ...", - self.node, self.grpc_endpoint - ); - loop { - if let Err(err) = self.register().await { - error!("Registration failed: {:?}", err); - }; - let _res = select! { - () = delay_for(self.hb_interval).fuse() => (), - msg = receiver.next() => { - match msg { - Some(_) => warn!("Messages have not been implemented yet"), - None => { - info!("Terminating the NATS client"); - break; - } - } - } - }; - } - - if let Err(err) = self.deregister().await { - error!("Deregistration failed: {:?}", err); - }; - Ok(()) - } - - /// Try to connect to the NATS server including DNS resolution step if - /// needed. - async fn connect(&self) -> Result { - debug!("Connecting to the message bus..."); - connect(&self.server) - .await - .map_err(|err| Error::ConnectFailed { - server: self.server.clone(), - cause: err, - }) - } - - /// Send a register message to the NATS server. - async fn register(&mut self) -> Result<(), Error> { - let payload = RegisterArgs { - id: self.node.clone(), - grpc_endpoint: self.grpc_endpoint.clone(), - }; - match &mut self.client { - Some(client) => client - .publish("register", serde_json::to_vec(&payload).unwrap()) - .await - .map_err(|cause| Error::QueueRegister { - cause, - })?, - None => return Err(Error::NotStarted {}), - } - // Note that the message was only queued and we don't know if it was - // really sent to the NATS server (limitation of the nats lib) - debug!( - "Registered '{}' and grpc server {}", - self.node, self.grpc_endpoint - ); - Ok(()) - } - - /// Send a deregister message to the NATS server. - async fn deregister(&mut self) -> Result<(), Error> { - let payload = DeregisterArgs { - id: self.node.clone(), - }; - match &mut self.client { - Some(client) => client - .publish("deregister", serde_json::to_vec(&payload).unwrap()) - .await - .map_err(|cause| Error::QueueRegister { - cause, - })?, - None => return Err(Error::NotStarted {}), - } - info!( - "Deregistered '{}' and grpc server {}", - self.node, self.grpc_endpoint - ); - Ok(()) - } -} - -/// Connect to the NATS server and start emitting periodic register messages. -/// Runs until the message_bus_stop() is called. -pub async fn message_bus_run( - server: &str, - node: &str, - grpc_endpoint: &str, -) -> Result<(), ()> { - let (sender, receiver) = mpsc::channel::<()>(1); - { - let mut sender_maybe = SENDER.lock().unwrap(); - if sender_maybe.is_some() { - panic!("Double initialization of message bus"); - } - *sender_maybe = Some(sender); - } - let mut mbus = MessageBus::new(server, node, grpc_endpoint); - match mbus.run(receiver).await { - Err(err) => { - error!("{}", err); - Err(()) - } - Ok(_) => Ok(()), - } -} - -/// Causes the future created by message_bus_run() to resolve. -pub fn message_bus_stop() { - // this will free the sender and unblock the receiver waiting for a message - let _sender_maybe = SENDER.lock().unwrap().take(); -} diff --git a/mayastor/src/pool.rs b/mayastor/src/pool.rs index 3fa31490b..2f235326f 100644 --- a/mayastor/src/pool.rs +++ b/mayastor/src/pool.rs @@ -3,216 +3,20 @@ //! They provide abstraction on top of aio and uring bdev, lvol store, etc //! and export simple-to-use json-rpc methods for managing pools. -use std::{ - ffi::{c_void, CStr, CString}, - os::raw::c_char, -}; - -use futures::channel::oneshot; -use snafu::Snafu; -use url::Url; +use std::{ffi::CStr, os::raw::c_char}; use rpc::mayastor as rpc; use spdk_sys::{ - bdev_aio_delete, - create_aio_bdev, - create_uring_bdev, - delete_uring_bdev, lvol_store_bdev, spdk_bs_free_cluster_count, spdk_bs_get_cluster_size, spdk_bs_total_data_cluster_count, spdk_lvol_store, - vbdev_get_lvol_store_by_name, - vbdev_get_lvs_bdev_by_lvs, vbdev_lvol_store_first, vbdev_lvol_store_next, - vbdev_lvs_create, - vbdev_lvs_destruct, - vbdev_lvs_examine, - LVS_CLEAR_WITH_NONE, -}; - -use crate::{ - bdev::{util::uring, Uri}, - core::{Bdev, Share}, - ffihelper::{cb_arg, done_cb}, - nexus_uri::{bdev_destroy, NexusBdevError}, - replica::ReplicaIter, }; -/// Errors for pool operations. -#[derive(Debug, Snafu)] -#[snafu(visibility = "pub(crate)")] -pub enum Error { - #[snafu(display( - "Invalid number of disks specified: should be 1, got {}", - num - ))] - BadNumDisks { num: usize }, - #[snafu(display( - "{} bdev {} already exists or parameters are invalid", - bdev_if, - name - ))] - BadBdev { bdev_if: String, name: String }, - #[snafu(display("Uring not supported by kernel"))] - UringUnsupported, - #[snafu(display("Invalid I/O interface: {}", io_if))] - InvalidIoInterface { io_if: i32 }, - #[snafu(display("Base bdev {} already exists", name))] - AlreadyBdev { name: String }, - #[snafu(display("Base bdev {} does not exist", name))] - UnknownBdev { name: String }, - #[snafu(display("The pool {} already exists", name))] - AlreadyExists { name: String }, - #[snafu(display("The pool {} does not exist", name))] - UnknownPool { name: String }, - #[snafu(display("Could not create pool {}", name))] - BadCreate { name: String }, - #[snafu(display("Failed to create the pool {} (errno={})", name, errno))] - FailedCreate { name: String, errno: i32 }, - #[snafu(display("The pool {} disappeared", name))] - PoolGone { name: String }, - #[snafu(display("The device {} hosts another pool", name))] - DeviceAlreadyUsed { name: String }, - #[snafu(display("Failed to import the pool {} (errno={})", name, errno))] - FailedImport { name: String, errno: i32 }, - #[snafu(display("Failed to unshare replica: {}", msg))] - FailedUnshareReplica { msg: String }, - #[snafu(display("Failed to destroy pool {} (errno={})", name, errno))] - FailedDestroyPool { name: String, errno: i32 }, - #[snafu(display( - "Failed to destroy base bdev {} type {} for the pool {} (errno={})", - bdev, - bdev_type, - name, - errno - ))] - FailedDestroyBdev { - bdev: String, - bdev_type: String, - name: String, - errno: i32, - }, -} - -impl From for tonic::Status { - fn from(e: Error) -> Self { - match e { - Error::BadNumDisks { - .. - } => Self::invalid_argument(e.to_string()), - Error::BadBdev { - .. - } => Self::invalid_argument(e.to_string()), - Error::UringUnsupported { - .. - } => Self::invalid_argument(e.to_string()), - Error::InvalidIoInterface { - .. - } => Self::invalid_argument(e.to_string()), - Error::AlreadyBdev { - .. - } => Self::invalid_argument(e.to_string()), - Error::UnknownBdev { - .. - } => Self::not_found(e.to_string()), - Error::AlreadyExists { - .. - } => Self::already_exists(e.to_string()), - Error::UnknownPool { - .. - } => Self::not_found(e.to_string()), - Error::BadCreate { - .. - } => Self::invalid_argument(e.to_string()), - Error::FailedCreate { - .. - } => Self::invalid_argument(e.to_string()), - Error::PoolGone { - .. - } => Self::not_found(e.to_string()), - Error::DeviceAlreadyUsed { - .. - } => Self::unavailable(e.to_string()), - Error::FailedImport { - .. - } => Self::internal(e.to_string()), - Error::FailedUnshareReplica { - .. - } => Self::internal(e.to_string()), - Error::FailedDestroyPool { - .. - } => Self::internal(e.to_string()), - Error::FailedDestroyBdev { - .. - } => Self::internal(e.to_string()), - } - } -} - -type Result = std::result::Result; - -/// Wrapper for create aio or uring bdev C function -pub fn create_base_bdev( - file: &str, - block_size: u32, - io_if: rpc::PoolIoIf, -) -> Result<()> { - let (mut do_uring, must_uring) = match io_if { - rpc::PoolIoIf::PoolIoAuto => (true, false), - rpc::PoolIoIf::PoolIoAio => (false, false), - rpc::PoolIoIf::PoolIoUring => (true, true), - }; - if do_uring && !uring::kernel_support() { - if must_uring { - return Err(Error::UringUnsupported); - } else { - warn!("Uring not supported by kernel, falling back to aio for bdev {}", file); - do_uring = false; - } - } - let bdev_type = if !do_uring { - ("aio", "AIO") - } else { - ("uring", "Uring") - }; - debug!("Creating {} bdev {} ...", bdev_type.0, file); - let cstr_file = CString::new(file).unwrap(); - let rc = if !do_uring { - unsafe { - create_aio_bdev(cstr_file.as_ptr(), cstr_file.as_ptr(), block_size) - } - } else if unsafe { - create_uring_bdev(cstr_file.as_ptr(), cstr_file.as_ptr(), block_size) - .is_null() - } { - -1 - } else { - 0 - }; - if rc != 0 { - Err(Error::BadBdev { - bdev_if: bdev_type.1.to_string(), - name: String::from(file), - }) - } else { - info!("{} bdev {} was created", bdev_type.0, file); - Ok(()) - } -} - -/// Callback called from SPDK for pool create and import methods. -extern "C" fn pool_done_cb( - sender_ptr: *mut c_void, - _lvs: *mut spdk_lvol_store, - errno: i32, -) { - let sender = - unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender) }; - sender.send(errno).expect("Receiver is gone"); -} +use crate::core::Bdev; /// Structure representing a pool which comprises lvol store and /// underlying bdev. @@ -235,24 +39,6 @@ impl Pool { } } - /// Look up existing pool by name - pub fn lookup(name: &str) -> Option { - let name = CString::new(name).unwrap(); - let lvs_ptr = unsafe { vbdev_get_lvol_store_by_name(name.as_ptr()) }; - if lvs_ptr.is_null() { - return None; - } - let lvs_bdev_ptr = unsafe { vbdev_get_lvs_bdev_by_lvs(lvs_ptr) }; - if lvs_bdev_ptr.is_null() { - // can happen if lvs is being destroyed - return None; - } - Some(Self { - lvs_ptr, - lvs_bdev_ptr, - }) - } - /// Get name of the pool. pub fn get_name(&self) -> &str { unsafe { @@ -286,205 +72,6 @@ impl Pool { spdk_bs_free_cluster_count(lvs.blobstore) * cluster_size } } - /// Return raw pointer to spdk lvol store structure - pub fn as_ptr(&self) -> *mut spdk_lvol_store { - self.lvs_ptr - } - - /// Create a pool on base bdev - pub async fn create<'a>(name: &'a str, disk: &'a str) -> Result { - let base_bdev = match Bdev::lookup_by_name(disk) { - Some(bdev) => bdev, - None => { - return Err(Error::UnknownBdev { - name: String::from(disk), - }); - } - }; - let pool_name = CString::new(name).unwrap(); - let (sender, receiver) = oneshot::channel::(); - let rc = unsafe { - vbdev_lvs_create( - base_bdev.as_ptr(), - pool_name.as_ptr(), - 0, - // We used to clear a pool with UNMAP but that takes awfully - // long time on large SSDs (~ can take an hour). Clearing the - // pool is not necessary. Clearing the lvol must be done, but - // lvols tend to be small so there the overhead is acceptable. - LVS_CLEAR_WITH_NONE, - Some(pool_done_cb), - cb_arg(sender), - ) - }; - // TODO: free sender - if rc < 0 { - return Err(Error::BadCreate { - name: String::from(name), - }); - } - - let lvs_errno = receiver.await.expect("Cancellation is not supported"); - if lvs_errno != 0 { - return Err(Error::FailedCreate { - name: String::from(name), - errno: lvs_errno, - }); - } - - match Pool::lookup(&name) { - Some(pool) => { - info!("The pool {} has been created", name); - Ok(pool) - } - None => Err(Error::PoolGone { - name: String::from(name), - }), - } - } - - /// Import the pool from a disk - pub async fn import<'a>(name: &'a str, disk: &'a str) -> Result { - let base_bdev = match Bdev::lookup_by_name(disk) { - Some(bdev) => bdev, - None => { - return Err(Error::UnknownBdev { - name: String::from(disk), - }); - } - }; - - let (sender, receiver) = oneshot::channel::(); - - debug!("Trying to import pool {}", name); - - unsafe { - vbdev_lvs_examine( - base_bdev.as_ptr(), - Some(pool_done_cb), - cb_arg(sender), - ); - } - let lvs_errno = receiver.await.expect("Cancellation is not supported"); - if lvs_errno == 0 { - // could be that a pool with a different name was imported - match Pool::lookup(&name) { - Some(pool) => { - info!("The pool {} has been imported", name); - Ok(pool) - } - None => Err(Error::DeviceAlreadyUsed { - name: String::from(disk), - }), - } - } else { - Err(Error::FailedImport { - name: String::from(name), - errno: lvs_errno, - }) - } - } - - /// Destroy the pool - pub async fn destroy(self) -> Result<()> { - let name = self.get_name().to_string(); - let base_bdev_name = self.get_base_bdev().name(); - - debug!("Destroying the pool {}", name); - - // unshare all replicas on the pool at first - for replica in ReplicaIter::new() { - if replica.get_pool_name() == name { - // XXX temporary - replica.unshare().await.map_err(|err| { - Error::FailedUnshareReplica { - msg: err.to_string(), - } - })?; - } - } - - // we will destroy lvol store now - let (sender, receiver) = oneshot::channel::(); - unsafe { - vbdev_lvs_destruct(self.lvs_ptr, Some(done_cb), cb_arg(sender)); - } - let lvs_errno = receiver.await.expect("Cancellation is not supported"); - if lvs_errno != 0 { - return Err(Error::FailedDestroyPool { - name, - errno: lvs_errno, - }); - } - - // we will destroy base bdev now - let base_bdev = match Bdev::lookup_by_name(&base_bdev_name) { - Some(bdev) => bdev, - None => { - // it's not an error if the base bdev disappeared but it is - // weird - warn!( - "Base bdev {} disappeared while destroying the pool {}", - base_bdev_name, name - ); - return Ok(()); - } - }; - if let Some(uri) = base_bdev.bdev_uri() { - debug!("destroying bdev {}", uri); - bdev_destroy(&uri) - .await - .map_err(|_e| Error::FailedDestroyBdev { - bdev: base_bdev.name(), - bdev_type: base_bdev.driver(), - name, - errno: -1, - }) - .map(|_| Ok(()))? - } else { - let base_bdev_type = base_bdev.driver(); - debug!( - "Destroying bdev {} type {}", - base_bdev.name(), - base_bdev_type - ); - - let (sender, receiver) = oneshot::channel::(); - if base_bdev_type == "aio" { - unsafe { - bdev_aio_delete( - base_bdev.as_ptr(), - Some(done_cb), - cb_arg(sender), - ); - } - } else { - unsafe { - delete_uring_bdev( - base_bdev.as_ptr(), - Some(done_cb), - cb_arg(sender), - ); - } - } - let bdev_errno = - receiver.await.expect("Cancellation is not supported"); - if bdev_errno != 0 { - Err(Error::FailedDestroyBdev { - bdev: base_bdev_name, - bdev_type: base_bdev_type, - name, - errno: bdev_errno, - }) - } else { - info!( - "The pool {} and base bdev {} type {} have been destroyed", - name, base_bdev_name, base_bdev_type - ); - Ok(()) - } - } - } } /// Iterator over available storage pools. @@ -538,114 +125,3 @@ impl From for rpc::Pool { } } } - -async fn create_pool_legacy(args: rpc::CreatePoolRequest) -> Result { - // TODO: support RAID-0 devices - if args.disks.len() != 1 { - return Err(Error::BadNumDisks { - num: args.disks.len(), - }); - } - - if let Some(pool) = Pool::lookup(&args.name) { - return if pool.get_base_bdev().name() == args.disks[0] { - Ok(pool.into()) - } else { - Err(Error::AlreadyExists { - name: args.name, - }) - }; - } - - // TODO: We would like to check if the disk is in use, but there - // is no easy way how to get this info using available api. - let disk = &args.disks[0]; - if Bdev::lookup_by_name(disk).is_some() { - return Err(Error::AlreadyBdev { - name: disk.clone(), - }); - } - // The block size may be missing or explicitly set to zero. In - // both cases we want to provide our own default value instead - // of SPDK's default which is 512. - // - // NOTE: Keep this in sync with nexus block size. - // Block sizes greater than 512 currently break the iscsi target, - // so for now we default size to 512. - let mut block_size = args.block_size; //.unwrap_or(0); - if block_size == 0 { - block_size = 512; - } - let io_if = match rpc::PoolIoIf::from_i32(args.io_if) { - Some(val) => val, - None => { - return Err(Error::InvalidIoInterface { - io_if: args.io_if, - }); - } - }; - create_base_bdev(disk, block_size, io_if)?; - - if let Ok(pool) = Pool::import(&args.name, disk).await { - return Ok(pool.into()); - }; - let pool = Pool::create(&args.name, disk).await?; - Ok(pool.into()) -} - -fn is_uri_scheme(disks: &[String]) -> bool { - !disks.iter().any(|d| Url::parse(d).is_err()) -} - -async fn create_pool_uri(args: rpc::CreatePoolRequest) -> Result { - if args.disks.len() != 1 { - return Err(Error::BadNumDisks { - num: args.disks.len(), - }); - } - - let parsed = Uri::parse(&args.disks[0]).map_err(|e| Error::BadBdev { - bdev_if: e.to_string(), - name: args.disks[0].clone(), - })?; - - if let Some(pool) = Pool::lookup(&args.name) { - return if pool.get_base_bdev().name() == parsed.get_name() { - Ok(pool.into()) - } else { - Err(Error::AlreadyExists { - name: args.name, - }) - }; - } - - let bdev = match parsed.create().await { - Err(e) => match e { - NexusBdevError::BdevExists { - .. - } => Ok(parsed.get_name()), - _ => Err(Error::BadBdev { - bdev_if: "".to_string(), - name: parsed.get_name(), - }), - }, - Ok(name) => Ok(name), - }?; - - if let Ok(pool) = Pool::import(&args.name, &bdev).await { - return Ok(pool.into()); - } - - let pool = Pool::create(&args.name, &bdev).await?; - Ok(pool.into()) -} - -pub async fn create_pool(args: rpc::CreatePoolRequest) -> Result { - if is_uri_scheme(&args.disks) { - debug!("pool creation with URI scheme"); - create_pool_uri(args).await - } else { - debug!("pool creation with legacy scheme"); - create_pool_legacy(args).await - } -} diff --git a/mayastor/src/replica.rs b/mayastor/src/replica.rs index 91c952038..6ae101436 100644 --- a/mayastor/src/replica.rs +++ b/mayastor/src/replica.rs @@ -3,48 +3,18 @@ //! Replica is a logical data volume exported over nvmf (in SPDK terminology //! an lvol). Here we define methods for easy management of replicas. #![allow(dead_code)] -use std::ffi::{c_void, CStr, CString}; +use std::ffi::CStr; -use futures::channel::oneshot; -use nix::errno::Errno; use rpc::mayastor as rpc; use snafu::{ResultExt, Snafu}; -use spdk_sys::{ - spdk_lvol, - spdk_nvme_cpl, - spdk_nvme_status, - spdk_nvmf_request, - vbdev_lvol_create, - vbdev_lvol_create_snapshot, - vbdev_lvol_destroy, - vbdev_lvol_get_from_bdev, - LVOL_CLEAR_WITH_UNMAP, - LVOL_CLEAR_WITH_WRITE_ZEROES, - SPDK_BDEV_IO_TYPE_UNMAP, -}; - -use crate::{ - core::Bdev, - ffihelper::{ - cb_arg, - done_errno_cb, - errno_result_from_i32, - ErrnoResult, - IntoCString, - }, - pool::Pool, - subsys::NvmfSubsystem, - target, -}; +use spdk_sys::{spdk_lvol, vbdev_lvol_get_from_bdev}; + +use crate::{core::Bdev, subsys::NvmfSubsystem, target}; /// These are high-level context errors one for each rpc method. #[derive(Debug, Snafu)] pub enum RpcError { - #[snafu(display("Failed to create replica {}", uuid))] - CreateReplica { source: Error, uuid: String }, - #[snafu(display("Failed to destroy replica {}", uuid))] - DestroyReplica { source: Error, uuid: String }, #[snafu(display("Failed to (un)share replica {}", uuid))] ShareReplica { source: Error, uuid: String }, } @@ -52,12 +22,6 @@ pub enum RpcError { impl From for tonic::Status { fn from(e: RpcError) -> Self { match e { - RpcError::CreateReplica { - source, .. - } => Self::from(source), - RpcError::DestroyReplica { - source, .. - } => Self::from(source), RpcError::ShareReplica { source, .. } => Self::from(source), @@ -68,16 +32,6 @@ impl From for tonic::Status { // Replica errors. #[derive(Debug, Snafu)] pub enum Error { - #[snafu(display("The pool \"{}\" does not exist", pool))] - PoolNotFound { pool: String }, - #[snafu(display("Replica already exists"))] - ReplicaExists {}, - #[snafu(display("Invalid parameters"))] - InvalidParams {}, - #[snafu(display("Failed to create lvol"))] - CreateLvol { source: Errno }, - #[snafu(display("Failed to destroy lvol"))] - DestroyLvol { source: Errno }, #[snafu(display("Replica has been already shared"))] ReplicaShared {}, #[snafu(display("share nvmf"))] @@ -97,21 +51,6 @@ pub enum Error { impl From for tonic::Status { fn from(e: Error) -> Self { match e { - Error::PoolNotFound { - .. - } => Self::not_found(e.to_string()), - Error::ReplicaExists { - .. - } => Self::already_exists(e.to_string()), - Error::InvalidParams { - .. - } => Self::invalid_argument(e.to_string()), - Error::CreateLvol { - .. - } => Self::invalid_argument(e.to_string()), - Error::DestroyLvol { - .. - } => Self::internal(e.to_string()), Error::ReplicaShared { .. } => Self::internal(e.to_string()), @@ -175,63 +114,6 @@ fn detect_share(uuid: &str) -> Option<(ShareType, String)> { } impl Replica { - /// Create replica on storage pool. - pub async fn create( - uuid: &str, - pool: &str, - size: u64, - thin: bool, - ) -> Result { - let pool = match Pool::lookup(pool) { - Some(p) => p, - None => { - return Err(Error::PoolNotFound { - pool: pool.to_owned(), - }) - } - }; - let clear_method = if pool - .get_base_bdev() - .io_type_supported(SPDK_BDEV_IO_TYPE_UNMAP) - { - LVOL_CLEAR_WITH_UNMAP - } else { - LVOL_CLEAR_WITH_WRITE_ZEROES - }; - - if Self::lookup(uuid).is_some() { - return Err(Error::ReplicaExists {}); - } - let c_uuid = CString::new(uuid).unwrap(); - let (sender, receiver) = - oneshot::channel::>(); - let rc = unsafe { - vbdev_lvol_create( - pool.as_ptr(), - c_uuid.as_ptr(), - size, - thin, - clear_method, - Some(Self::replica_done_cb), - cb_arg(sender), - ) - }; - if rc != 0 { - // XXX sender is leaked - return Err(Error::InvalidParams {}); - } - - let lvol_ptr = receiver - .await - .expect("Cancellation is not supported") - .context(CreateLvol {})?; - - info!("Created replica {} on pool {}", uuid, pool.get_name()); - Ok(Self { - lvol_ptr, - }) - } - /// Lookup replica by uuid (=name). pub fn lookup(uuid: &str) -> Option { match Bdev::lookup_by_name(uuid) { @@ -252,89 +134,6 @@ impl Replica { } } - /// Destroy replica. Consumes the "self" so after calling this method self - /// can't be used anymore. If the replica is shared, it is unshared before - /// the destruction. - // - // TODO: Error value should contain self so that it can be used when - // destroy fails. - pub async fn destroy(self) -> Result<()> { - self.unshare().await?; - - let uuid = self.get_uuid(); - let (sender, receiver) = oneshot::channel::>(); - unsafe { - vbdev_lvol_destroy( - self.lvol_ptr, - Some(done_errno_cb), - cb_arg(sender), - ); - } - - receiver - .await - .expect("Cancellation is not supported") - .context(DestroyLvol {})?; - - info!("Destroyed replica {}", uuid); - Ok(()) - } - - /// Format snapshot name - /// base_name is the nexus or replica UUID - pub fn format_snapshot_name(base_name: &str, snapshot_time: u64) -> String { - format!("{}-snap-{}", base_name, snapshot_time) - } - - /// Create a snapshot - pub async fn create_snapshot( - self, - nvmf_req: *mut spdk_nvmf_request, - snapshot_name: &str, - ) { - extern "C" fn snapshot_done_cb( - nvmf_req_ptr: *mut c_void, - _lvol_ptr: *mut spdk_lvol, - errno: i32, - ) { - let rsp: &mut spdk_nvme_cpl = unsafe { - &mut *spdk_sys::spdk_nvmf_request_get_response( - nvmf_req_ptr as *mut spdk_nvmf_request, - ) - }; - let nvme_status: &mut spdk_nvme_status = - unsafe { &mut rsp.__bindgen_anon_1.status }; - - nvme_status.set_sct(0); // SPDK_NVME_SCT_GENERIC - nvme_status.set_sc(match errno { - 0 => 0, - _ => { - debug!("vbdev_lvol_create_snapshot errno {}", errno); - 0x06 // SPDK_NVME_SC_INTERNAL_DEVICE_ERROR - } - }); - - // From nvmf_bdev_ctrlr_complete_cmd - unsafe { - spdk_sys::spdk_nvmf_request_complete( - nvmf_req_ptr as *mut spdk_nvmf_request, - ); - } - } - - let c_snapshot_name = snapshot_name.into_cstring(); - unsafe { - vbdev_lvol_create_snapshot( - self.as_ptr(), - c_snapshot_name.as_ptr(), - Some(snapshot_done_cb), - nvmf_req as *mut c_void, - ) - }; - - info!("Creating snapshot {}", snapshot_name); - } - /// Expose replica over supported remote access storage protocols (nvmf /// and iscsi). pub async fn share(&self, kind: ShareType) -> Result<()> { @@ -416,27 +215,6 @@ impl Replica { pub fn is_thin(&self) -> bool { unsafe { (*self.lvol_ptr).thin_provision } } - - /// Return raw pointer to lvol (C struct spdk_lvol). - pub fn as_ptr(&self) -> *mut spdk_lvol { - self.lvol_ptr - } - - /// Callback called from SPDK for replica create method. - extern "C" fn replica_done_cb( - sender_ptr: *mut c_void, - lvol_ptr: *mut spdk_lvol, - errno: i32, - ) { - let sender = unsafe { - Box::from_raw( - sender_ptr as *mut oneshot::Sender>, - ) - }; - sender - .send(errno_result_from_i32(lvol_ptr, errno)) - .expect("Receiver is gone"); - } } /// Iterator over replicas @@ -525,107 +303,6 @@ impl From for rpc::Replica { } } -pub(crate) async fn create_replica( - args: rpc::CreateReplicaRequest, -) -> Result { - let want_share = match rpc::ShareProtocolReplica::from_i32(args.share) { - Some(val) => val, - None => Err(Error::InvalidProtocol { - protocol: args.share, - }) - .context(CreateReplica { - uuid: args.uuid.clone(), - })?, - }; - let replica = match Replica::lookup(&args.uuid) { - Some(r) => r, - None => Replica::create(&args.uuid, &args.pool, args.size, args.thin) - .await - .context(CreateReplica { - uuid: args.uuid.clone(), - })?, - }; - - // TODO: destroy replica if the share operation fails - match want_share { - rpc::ShareProtocolReplica::ReplicaNvmf => replica - .share(ShareType::Nvmf) - .await - .context(CreateReplica { - uuid: args.uuid.clone(), - })?, - rpc::ShareProtocolReplica::ReplicaIscsi => replica - .share(ShareType::Iscsi) - .await - .context(CreateReplica { - uuid: args.uuid.clone(), - })?, - rpc::ShareProtocolReplica::ReplicaNone => (), - } - Ok(replica.into()) -} - -pub(crate) async fn destroy_replica( - args: rpc::DestroyReplicaRequest, -) -> Result<(), RpcError> { - match Replica::lookup(&args.uuid) { - Some(replica) => replica.destroy().await.context(DestroyReplica { - uuid: args.uuid, - }), - None => Ok(()), - } -} - -pub(crate) fn list_replicas() -> rpc::ListReplicasReply { - rpc::ListReplicasReply { - replicas: ReplicaIter::new() - .map(|r| r.into()) - .collect::>(), - } -} - -pub(crate) async fn stat_replicas() -> Result -{ - let mut stats = Vec::new(); - - // XXX is it safe to hold bdev pointer in iterator across context - // switch!? - for r in ReplicaIter::new() { - let lvol = r.as_ptr(); - let uuid = r.get_uuid().to_owned(); - let pool = r.get_pool_name().to_owned(); - let bdev: Bdev = unsafe { (*lvol).bdev.into() }; - - // cancellation point here - let st = bdev.stats().await; - - match st { - Ok(st) => { - stats.push(rpc::ReplicaStats { - uuid, - pool, - stats: Some(rpc::Stats { - num_read_ops: st.num_read_ops, - num_write_ops: st.num_write_ops, - bytes_read: st.bytes_read, - bytes_written: st.bytes_written, - }), - }); - } - Err(errno) => { - warn!( - "Failed to get stats for {} (errno={})", - bdev.name(), - errno - ); - } - } - } - Ok(rpc::StatReplicasReply { - replicas: stats, - }) -} - pub(crate) async fn share_replica( args: rpc::ShareReplicaRequest, ) -> Result { diff --git a/mayastor/src/subsys/config/mod.rs b/mayastor/src/subsys/config/mod.rs index 866af0c85..f917cedd4 100644 --- a/mayastor/src/subsys/config/mod.rs +++ b/mayastor/src/subsys/config/mod.rs @@ -32,17 +32,17 @@ use crate::{ bdev::{ nexus::{ instances, - nexus_child::NexusChild, + nexus_child::{ChildState, NexusChild, Reason}, nexus_child_status_config::ChildStatusConfig, }, nexus_create, - ChildStatus, VerboseError, }, core::{Bdev, Cores, Reactor, Share}, jsonrpc::{jsonrpc_register, Code, RpcErrorCode}, + lvs::Lvs, nexus_uri::bdev_create, - pool::{create_pool, PoolsIter}, + pool::PoolsIter, replica::{self, ReplicaIter, ShareType}, subsys::{ config::opts::{ @@ -298,8 +298,6 @@ impl Config { Pool { name: p.get_name().into(), disks: vec![base.bdev_uri().unwrap_or_else(|| base.name())], - blk_size: base.block_len(), - io_if: 0, // AIO replicas: ReplicaIter::new() .map(|p| Replica { name: p.get_uuid().to_string(), @@ -399,7 +397,10 @@ impl Config { let degraded_children: Vec<&NexusChild> = nexus_instance .children .iter() - .filter(|child| child.status() == ChildStatus::Degraded) + .filter(|child| { + child.state() + == ChildState::Faulted(Reason::OutOfSync) + }) .collect::>(); // Get a mutable reference to the nexus instance. We can't @@ -469,7 +470,7 @@ impl Config { if let Some(pools) = self.pools.as_ref() { for pool in pools { info!("creating pool {}", pool.name); - if let Err(e) = create_pool(pool.into()).await { + if let Err(e) = Lvs::create_or_import(pool.into()).await { error!( "Failed to create pool {}. {}", pool.name, @@ -592,10 +593,6 @@ pub struct Pool { pub name: String, /// bdevs to create outside of the nexus control pub disks: Vec, - /// the block_size the pool should use - pub blk_size: u32, - /// use AIO, uring or auto detect - pub io_if: i32, /// list of replicas to share on load pub replicas: Vec, } @@ -606,8 +603,6 @@ impl From<&Pool> for rpc::mayastor::CreatePoolRequest { Self { name: o.name.clone(), disks: o.disks.clone(), - block_size: o.blk_size, - io_if: o.io_if, } } } diff --git a/mayastor/src/subsys/config/opts.rs b/mayastor/src/subsys/config/opts.rs index e664e1a21..bb206425f 100644 --- a/mayastor/src/subsys/config/opts.rs +++ b/mayastor/src/subsys/config/opts.rs @@ -211,9 +211,9 @@ pub struct NvmeBdevOpts { arbitration_burst: u32, /// max number of low priority cmds a controller may launch at one time low_priority_weight: u32, - /// max number of medium priority cmds a controller my launch at one time + /// max number of medium priority cmds a controller may launch at one time medium_priority_weight: u32, - /// max number of high priority cmds a controller my launch at one time + /// max number of high priority cmds a controller may launch at one time high_priority_weight: u32, /// admin queue polling period nvme_adminq_poll_period_us: u64, @@ -372,7 +372,7 @@ pub struct IscsiTgtOpts { mutual_chap: bool, /// chap group chap_group: i32, - /// max number of sessions in th host + /// max number of sessions in the host max_sessions: u32, /// max connections per session max_connections_per_session: u32, @@ -465,7 +465,7 @@ impl GetOpts for IscsiTgtOpts { unsafe { // spdk_iscsi_opts_copy copies our struct to a new portion of // memory and returns a pointer to it which we store into the - // defined global. Later one, when iscsi initializes those options + // defined global. Later on, when iscsi initializes, those options // are verified and then -- copied to g_spdk_iscsi. Once they // are copied g_spdk_iscsi_opts is freed. g_spdk_iscsi_opts = iscsi_opts_copy(&mut self.into()); diff --git a/mayastor/src/subsys/mbus/mbus_nats.rs b/mayastor/src/subsys/mbus/mbus_nats.rs new file mode 100644 index 000000000..4ef904164 --- /dev/null +++ b/mayastor/src/subsys/mbus/mbus_nats.rs @@ -0,0 +1,104 @@ +//! NATS implementation of the `MessageBus` connecting mayastor to the control +//! plane components. + +use super::{Channel, MessageBus}; +use async_trait::async_trait; +use nats::asynk::Connection; +use once_cell::sync::OnceCell; +use serde::Serialize; +use smol::io; + +pub(super) static NATS_MSG_BUS: OnceCell = OnceCell::new(); +pub(super) fn message_bus_init(server: String) { + NATS_MSG_BUS.get_or_init(|| { + // Waits for the message bus to become ready + tokio::runtime::Runtime::new() + .unwrap() + .block_on(async { NatsMessageBus::new(&server).await }) + }); +} + +// Would we want to have both sync and async clients? +pub struct NatsMessageBus { + connection: Connection, +} +impl NatsMessageBus { + pub async fn connect(server: &str) -> Connection { + info!("Connecting to the nats server {}...", server); + // We retry in a loop until successful. Once connected the nats + // library will handle reconnections for us. + let interval = std::time::Duration::from_millis(500); + let mut log_error = true; + loop { + match nats::asynk::connect(server).await { + Ok(connection) => { + info!( + "Successfully connected to the nats server {}", + server + ); + return connection; + } + Err(error) => { + if log_error { + warn!( + "Error connection: {}. Quietly retrying...", + error + ); + log_error = false; + } + smol::Timer::after(interval).await; + continue; + } + } + } + } + + async fn new(server: &str) -> Self { + Self { + connection: Self::connect(server).await, + } + } +} + +#[async_trait] +impl MessageBus for NatsMessageBus { + async fn publish( + &self, + channel: Channel, + message: impl Serialize + + std::marker::Send + + std::marker::Sync + + 'async_trait, + ) -> std::io::Result<()> { + let payload = serde_json::to_vec(&message)?; + self.connection + .publish(&channel.to_string(), &payload) + .await + } + + async fn send( + &self, + _channel: Channel, + _message: impl Serialize + + std::marker::Send + + std::marker::Sync + + 'async_trait, + ) -> Result<(), ()> { + unimplemented!() + } + + async fn request( + &self, + _channel: Channel, + _message: impl Serialize + + std::marker::Send + + std::marker::Sync + + 'async_trait, + ) -> Result, ()> { + unimplemented!() + } + + async fn flush(&self) -> io::Result<()> { + self.connection.flush().await + } +} diff --git a/mayastor/src/subsys/mbus/mod.rs b/mayastor/src/subsys/mbus/mod.rs new file mode 100644 index 000000000..8be611238 --- /dev/null +++ b/mayastor/src/subsys/mbus/mod.rs @@ -0,0 +1,159 @@ +//! Message bus connecting mayastor to the control plane components. +//! +//! It is designed to make sending events to control plane easy in the future. +//! +//! A Registration subsystem is used to keep moac in the loop +//! about the lifecycle of mayastor instances. + +pub mod mbus_nats; +pub mod registration; + +use crate::core::MayastorEnvironment; +use async_trait::async_trait; +use dns_lookup::{lookup_addr, lookup_host}; +use mbus_nats::NATS_MSG_BUS; +use registration::Registration; +use serde::Serialize; +use smol::io; +use spdk_sys::{ + spdk_add_subsystem, + spdk_subsystem, + spdk_subsystem_fini_next, + spdk_subsystem_init_next, +}; +use std::net::{IpAddr, Ipv4Addr}; + +pub fn mbus_endpoint(endpoint: Option) -> Option { + match endpoint { + Some(endpoint) => { + let (address_or_ip, port) = if endpoint.contains(':') { + let mut s = endpoint.split(':'); + ( + s.next().unwrap(), + s.next().unwrap().parse::().expect("Invalid Port"), + ) + } else { + (endpoint.as_str(), 4222) + }; + + if let Ok(ipv4) = address_or_ip.parse::() { + lookup_addr(&IpAddr::V4(ipv4)).expect("Invalid Ipv4 Address"); + } else { + lookup_host(&address_or_ip).expect("Invalid Host Name"); + } + + Some(format!("{}:{}", address_or_ip, port)) + } + _ => None, + } +} + +// wrapper around our MBUS subsystem used for registration +pub struct MessageBusSubsystem(*mut spdk_subsystem); + +impl Default for MessageBusSubsystem { + fn default() -> Self { + Self::new() + } +} + +impl MessageBusSubsystem { + /// initialise a new subsystem that handles the control plane + /// message bus registration process + extern "C" fn init() { + debug!("mayastor mbus subsystem init"); + let args = MayastorEnvironment::global_or_default(); + if let (Some(_), Some(grpc)) = (args.mbus_endpoint, args.grpc_endpoint) + { + Registration::init(&args.node_name, &grpc.to_string()); + } + unsafe { spdk_subsystem_init_next(0) } + } + + extern "C" fn fini() { + debug!("mayastor mbus subsystem fini"); + let args = MayastorEnvironment::global_or_default(); + if args.mbus_endpoint.is_some() && args.grpc_endpoint.is_some() { + Registration::get().fini(); + } + unsafe { spdk_subsystem_fini_next() } + } + + fn new() -> Self { + info!("creating Mayastor mbus subsystem..."); + let mut ss = Box::new(spdk_subsystem::default()); + ss.name = b"mayastor_mbus\x00" as *const u8 as *const libc::c_char; + ss.init = Some(Self::init); + ss.fini = Some(Self::fini); + ss.write_config_json = None; + Self(Box::into_raw(ss)) + } + + /// register the subsystem with spdk + pub(super) fn register() { + unsafe { spdk_add_subsystem(MessageBusSubsystem::new().0) } + } +} + +/// Available Message Bus channels +pub enum Channel { + /// Registration of mayastor with the control plane + Register, + /// DeRegistration of mayastor with the control plane + DeRegister, +} + +impl std::fmt::Display for Channel { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match *self { + Channel::Register => write!(f, "register"), + Channel::DeRegister => write!(f, "deregister"), + } + } +} + +#[async_trait] +pub trait MessageBus { + /// publish a message - not guaranteed to be sent or received (fire and + /// forget) + async fn publish( + &self, + channel: Channel, + message: impl Serialize + + std::marker::Send + + std::marker::Sync + + 'async_trait, + ) -> std::io::Result<()>; + /// Send a message and wait for it to be received by the target component + async fn send( + &self, + channel: Channel, + message: impl Serialize + + std::marker::Send + + std::marker::Sync + + 'async_trait, + ) -> Result<(), ()>; + /// Send a message and request a reply from the target component + async fn request( + &self, + channel: Channel, + message: impl Serialize + + std::marker::Send + + std::marker::Sync + + 'async_trait, + ) -> Result, ()>; + /// Flush queued messages to the server + async fn flush(&self) -> io::Result<()>; +} + +pub fn message_bus_init() { + if let Some(nats) = MayastorEnvironment::global_or_default().mbus_endpoint { + mbus_nats::message_bus_init(nats); + } +} + +pub fn message_bus() -> &'static impl MessageBus { + NATS_MSG_BUS + .get() + .expect("Should be initialised before use") +} diff --git a/mayastor/src/subsys/mbus/registration.rs b/mayastor/src/subsys/mbus/registration.rs new file mode 100644 index 000000000..213384c7c --- /dev/null +++ b/mayastor/src/subsys/mbus/registration.rs @@ -0,0 +1,202 @@ +//! Registration subsystem connecting mayastor to control plane (moac). +//! A registration message is used to let the control plane know about a +//! mayastor instance. A deregistration message is used let the control plane +//! know that a mayastor instance is going down. +//! +//! The registration messages are currently sent on an `HB_INTERVAL` by default +//! but can be overridden by the `MAYASTOR_HB_INTERVAL` environment variable. +//! containing the node name and the grpc endpoint. + +use super::MessageBus; +use crate::subsys::mbus::Channel; +use futures::{select, FutureExt, StreamExt}; +use once_cell::sync::OnceCell; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use std::{env, time::Duration}; + +/// Mayastor sends registration messages in this interval (kind of heart-beat) +const HB_INTERVAL: Duration = Duration::from_secs(10); + +/// Errors for pool operations. +/// +/// Note: The types here that would be normally used as source for snafu errors +/// do not implement Error trait required by Snafu. So they are renamed to +/// "cause" attribute and we use .map_err() instead of .context() when creating +/// them. +#[derive(Debug, Snafu)] +pub enum Error { + #[snafu(display( + "Failed to connect to the MessageBus server {}: {:?}", + server, + cause + ))] + ConnectFailed { + cause: std::io::Error, + server: String, + }, + #[snafu(display( + "Cannot issue requests if message bus hasn't been started" + ))] + NotStarted {}, + #[snafu(display("Failed to queue register request: {:?}", cause))] + QueueRegister { cause: std::io::Error }, + #[snafu(display("Failed to queue deregister request: {:?}", cause))] + QueueDeregister { cause: std::io::Error }, +} + +/// Register message payload +#[derive(Serialize, Deserialize, Debug)] +struct RegisterArgs { + id: String, + #[serde(rename = "grpcEndpoint")] + grpc_endpoint: String, +} + +/// Deregister message payload +#[derive(Serialize, Deserialize, Debug)] +struct DeregisterArgs { + id: String, +} + +#[derive(Clone)] +struct Configuration { + /// Name of the node that mayastor is running on + node: String, + /// gRPC endpoint of the server provided by mayastor + grpc_endpoint: String, + /// heartbeat interval (how often the register message is sent) + hb_interval: Duration, +} + +#[derive(Clone)] +pub struct Registration { + /// Configuration of the registration + config: Configuration, + /// Receive channel for messages and termination + rcv_chan: smol::channel::Receiver<()>, + /// Termination channel + fini_chan: smol::channel::Sender<()>, +} + +static MESSAGE_BUS_REG: OnceCell = OnceCell::new(); +impl Registration { + /// initialise the global registration instance + pub(super) fn init(node: &str, grpc_endpoint: &str) { + MESSAGE_BUS_REG.get_or_init(|| Registration::new(node, grpc_endpoint)); + } + + /// terminate and re-register + pub(super) fn fini(&self) { + self.fini_chan.close(); + } + + pub(super) fn get() -> &'static Registration { + MESSAGE_BUS_REG.get().unwrap() + } + + /// runner responsible for registering and + /// de-registering the mayastor instance on shutdown + pub async fn run() -> Result<(), ()> { + if let Some(registration) = MESSAGE_BUS_REG.get() { + registration.clone().run_loop().await; + } + Ok(()) + } + + fn new(node: &str, grpc_endpoint: &str) -> Registration { + let (msg_sender, msg_receiver) = smol::channel::unbounded::<()>(); + let config = Configuration { + node: node.to_owned(), + grpc_endpoint: grpc_endpoint.to_owned(), + hb_interval: match env::var("MAYASTOR_HB_INTERVAL") + .map(|v| v.parse::()) + { + Ok(Ok(num)) => Duration::from_secs(num), + _ => HB_INTERVAL, + }, + }; + Self { + config, + rcv_chan: msg_receiver, + fini_chan: msg_sender, + } + } + + /// Connect to the server and start emitting periodic register + /// messages. + /// Runs until the sender side of the message channel is closed + pub async fn run_loop(&mut self) { + info!( + "Registering '{}' and grpc server {} ...", + self.config.node, self.config.grpc_endpoint + ); + loop { + if let Err(err) = self.register().await { + error!("Registration failed: {:?}", err); + }; + + select! { + _ = tokio::time::delay_for(self.config.hb_interval).fuse() => continue, + msg = self.rcv_chan.next().fuse() => { + match msg { + Some(_) => log::info!("Messages have not been implemented yet"), + _ => { + log::info!("Terminating the registration handler"); + break; + } + } + } + }; + } + if let Err(err) = self.deregister().await { + error!("Deregistration failed: {:?}", err); + }; + } + + /// Send a register message to the MessageBus. + async fn register(&self) -> Result<(), Error> { + let payload = RegisterArgs { + id: self.config.node.clone(), + grpc_endpoint: self.config.grpc_endpoint.clone(), + }; + super::message_bus() + .publish(Channel::Register, &payload) + .await + .map_err(|cause| Error::QueueRegister { + cause, + })?; + + // Note that the message was only queued and we don't know if it was + // really sent to the message server + // We could explicitly flush to make sure it reaches the server or + // use request/reply to guarantee that it was delivered + debug!( + "Registered '{}' and grpc server {}", + self.config.node, self.config.grpc_endpoint + ); + Ok(()) + } + + /// Send a deregister message to the MessageBus. + async fn deregister(&self) -> Result<(), Error> { + let payload = DeregisterArgs { + id: self.config.node.clone(), + }; + super::message_bus() + .publish(Channel::DeRegister, &payload) + .await + .map_err(|cause| Error::QueueDeregister { + cause, + })?; + if let Err(e) = super::message_bus().flush().await { + error!("Failed to explicitly flush: {}", e); + } + + info!( + "Deregistered '{}' and grpc server {}", + self.config.node, self.config.grpc_endpoint + ); + Ok(()) + } +} diff --git a/mayastor/src/subsys/mod.rs b/mayastor/src/subsys/mod.rs index c647f673d..7210dd270 100644 --- a/mayastor/src/subsys/mod.rs +++ b/mayastor/src/subsys/mod.rs @@ -10,7 +10,10 @@ pub use config::{ Pool, }; pub use nvmf::{ + set_snapshot_time, Error as NvmfError, + NvmeCpl, + NvmfReq, NvmfSubsystem, SubType, Target as NvmfTarget, @@ -21,11 +24,21 @@ use spdk_sys::{ spdk_subsystem_depend, }; +pub use mbus::{ + mbus_endpoint, + message_bus_init, + registration::Registration, + MessageBus, + MessageBusSubsystem, +}; + use crate::subsys::nvmf::Nvmf; mod config; +mod mbus; mod nvmf; +/// Register initial subsystems pub(crate) fn register_subsystem() { unsafe { spdk_add_subsystem(ConfigSubsystem::new().0) } unsafe { @@ -35,4 +48,5 @@ pub(crate) fn register_subsystem() { spdk_add_subsystem(Nvmf::new().0); spdk_add_subsystem_depend(Box::into_raw(depend)); } + MessageBusSubsystem::register(); } diff --git a/mayastor/src/subsys/nvmf/admin_cmd.rs b/mayastor/src/subsys/nvmf/admin_cmd.rs index c7f3702bc..19930ee27 100644 --- a/mayastor/src/subsys/nvmf/admin_cmd.rs +++ b/mayastor/src/subsys/nvmf/admin_cmd.rs @@ -1,13 +1,75 @@ //! Handlers for custom NVMe Admin commands -use spdk_sys::{spdk_bdev, spdk_bdev_desc, spdk_io_channel, spdk_nvmf_request}; +use std::{ + convert::TryFrom, + ffi::c_void, + ptr::NonNull, + time::{SystemTime, UNIX_EPOCH}, +}; + +use spdk_sys::{ + spdk_bdev, + spdk_bdev_desc, + spdk_io_channel, + spdk_nvme_cmd, + spdk_nvme_cpl, + spdk_nvme_status, + spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, + spdk_nvmf_request, +}; use crate::{ - bdev::nexus::nexus_io::nvme_admin_opc, + bdev::nexus::{nexus_io::nvme_admin_opc, nexus_module}, core::{Bdev, Reactors}, - replica::Replica, + lvs::Lvol, }; +#[derive(Clone)] +pub struct NvmeCpl(pub(crate) NonNull); + +impl NvmeCpl { + /// Returns the NVMe status + pub(crate) fn status(&mut self) -> &mut spdk_nvme_status { + unsafe { &mut *spdk_sys::nvme_status_get(self.0.as_mut()) } + } +} + +#[derive(Clone)] +pub struct NvmfReq(pub(crate) NonNull); + +impl NvmfReq { + /// Returns the NVMe completion + pub(crate) fn response(&self) -> NvmeCpl { + NvmeCpl( + NonNull::new(unsafe { + &mut *spdk_sys::spdk_nvmf_request_get_response(self.0.as_ptr()) + }) + .unwrap(), + ) + } +} + +impl From<*mut c_void> for NvmfReq { + fn from(ptr: *mut c_void) -> Self { + NvmfReq(NonNull::new(ptr as *mut spdk_nvmf_request).unwrap()) + } +} + +/// Set the snapshot time in an spdk_nvme_cmd struct to the current time +/// Returns seconds since Unix epoch +pub fn set_snapshot_time(cmd: &mut spdk_nvme_cmd) -> u64 { + // encode snapshot time in cdw10/11 + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + unsafe { + *spdk_sys::nvme_cmd_cdw10_get(&mut *cmd) = now as u32; + *spdk_sys::nvme_cmd_cdw11_get(&mut *cmd) = (now >> 32) as u32; + } + now as u64 +} + /// NVMf custom command handler for opcode c0h /// Called from nvmf_ctrlr_process_admin_cmd /// Return: <0 for any error, caller handles it as unsupported opcode @@ -42,20 +104,32 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { } let bd = Bdev::from(bdev); - if let Some(replica) = Replica::from_bdev(&bd) { - let cmd = unsafe { &*spdk_sys::spdk_nvmf_request_get_cmd(req) }; + let base_name = bd.name(); + if bd.driver() == nexus_module::NEXUS_NAME { + // Received command on a published Nexus + set_snapshot_time(unsafe { + &mut *spdk_sys::spdk_nvmf_request_get_cmd(req) + }); + unsafe { + spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, None) + } + } else if let Ok(lvol) = Lvol::try_from(bd) { + // Received command on a shared replica (lvol) + let cmd = unsafe { spdk_sys::spdk_nvmf_request_get_cmd(req) }; let snapshot_time = unsafe { - cmd.__bindgen_anon_1.cdw10 as u64 - | (cmd.__bindgen_anon_2.cdw11 as u64) << 32 + *spdk_sys::nvme_cmd_cdw10_get(cmd) as u64 + | (*spdk_sys::nvme_cmd_cdw11_get(cmd) as u64) << 32 }; let snapshot_name = - Replica::format_snapshot_name(&bd.name(), snapshot_time); + Lvol::format_snapshot_name(&base_name, snapshot_time); + let nvmf_req = NvmfReq(NonNull::new(req).unwrap()); // Blobfs operations must be on md_thread Reactors::master().send_future(async move { - replica.create_snapshot(req, &snapshot_name).await; + lvol.create_snapshot(&nvmf_req, &snapshot_name).await; }); 1 // SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS } else { + debug!("unsupported bdev driver"); -1 } } diff --git a/mayastor/src/subsys/nvmf/mod.rs b/mayastor/src/subsys/nvmf/mod.rs index 0299c9240..b964e9353 100644 --- a/mayastor/src/subsys/nvmf/mod.rs +++ b/mayastor/src/subsys/nvmf/mod.rs @@ -13,6 +13,7 @@ use std::cell::RefCell; use nix::errno::Errno; use snafu::Snafu; +pub use admin_cmd::{set_snapshot_time, NvmeCpl, NvmfReq}; use poll_groups::PollGroup; use spdk_sys::{ spdk_subsystem, diff --git a/mayastor/tests/add_child.rs b/mayastor/tests/add_child.rs index 57780ac10..61ed18bcb 100644 --- a/mayastor/tests/add_child.rs +++ b/mayastor/tests/add_child.rs @@ -1,5 +1,8 @@ +#[macro_use] +extern crate assert_matches; + use mayastor::{ - bdev::{nexus_create, nexus_lookup, ChildStatus}, + bdev::{nexus_create, nexus_lookup, ChildState, Reason}, core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, }; @@ -48,9 +51,12 @@ fn add_child() { .await .expect("Failed to add child"); assert_eq!(nexus.children.len(), 2); - // A faulted state indicates the child was added but something - // went wrong i.e. the rebuild failed to start - assert_ne!(nexus.children[1].status(), ChildStatus::Faulted); + + // Expect the added child to be in the out-of-sync state + assert_matches!( + nexus.children[1].state(), + ChildState::Faulted(Reason::OutOfSync) + ); }); // Test removing a child from an unshared nexus @@ -80,9 +86,12 @@ fn add_child() { .await .expect("Failed to add child"); assert_eq!(nexus.children.len(), 2); - // A faulted state indicates the child was added but something - // went wrong i.e. the rebuild failed to start - assert_ne!(nexus.children[1].status(), ChildStatus::Faulted); + + // Expect the added child to be in the out-of-sync state + assert_matches!( + nexus.children[1].state(), + ChildState::Faulted(Reason::OutOfSync) + ); }); // Test removing a child from a shared nexus diff --git a/mayastor/tests/common/bdev_io.rs b/mayastor/tests/common/bdev_io.rs index f8a3ecea9..f2fd0c1e1 100644 --- a/mayastor/tests/common/bdev_io.rs +++ b/mayastor/tests/common/bdev_io.rs @@ -1,33 +1,41 @@ use mayastor::core::{BdevHandle, CoreError}; -pub async fn write_some(nexus_name: &str) -> Result<(), CoreError> { +pub async fn write_some( + nexus_name: &str, + offset: u64, + fill: u8, +) -> Result<(), CoreError> { let h = BdevHandle::open(nexus_name, true, false).unwrap(); let mut buf = h.dma_malloc(512).expect("failed to allocate buffer"); - buf.fill(0xff); + buf.fill(fill); let s = buf.as_slice(); - assert_eq!(s[0], 0xff); + assert_eq!(s[0], fill); - h.write_at(0, &buf).await?; + h.write_at(offset, &buf).await?; Ok(()) } -pub async fn read_some(nexus_name: &str) -> Result<(), CoreError> { +pub async fn read_some( + nexus_name: &str, + offset: u64, + fill: u8, +) -> Result<(), CoreError> { let h = BdevHandle::open(nexus_name, true, false).unwrap(); let mut buf = h.dma_malloc(1024).expect("failed to allocate buffer"); let slice = buf.as_mut_slice(); assert_eq!(slice[0], 0); - slice[512] = 0xff; - assert_eq!(slice[512], 0xff); + slice[512] = fill; + assert_eq!(slice[512], fill); - let len = h.read_at(0, &mut buf).await?; + let len = h.read_at(offset, &mut buf).await?; assert_eq!(len, 1024); let slice = buf.as_slice(); for &it in slice.iter().take(512) { - assert_eq!(it, 0xff); + assert_eq!(it, fill); } assert_eq!(slice[512], 0); Ok(()) diff --git a/mayastor/tests/fault_child.rs b/mayastor/tests/fault_child.rs new file mode 100644 index 000000000..3e127590e --- /dev/null +++ b/mayastor/tests/fault_child.rs @@ -0,0 +1,35 @@ +use mayastor::{ + bdev::{nexus_create, nexus_lookup, Reason}, + core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor}, +}; + +pub mod common; + +static NEXUS_NAME: &str = "FaultChildNexus"; +static NEXUS_SIZE: u64 = 10 * 1024 * 1024; +static CHILD_1: &str = "malloc:///malloc0?blk_size=512&size_mb=10"; +static CHILD_2: &str = "malloc:///malloc1?blk_size=512&size_mb=10"; + +#[test] +fn fault_child() { + common::mayastor_test_init(); + let ms = MayastorEnvironment::new(MayastorCliArgs::default()); + ms.start(|| { + Reactor::block_on(async { + nexus_create(NEXUS_NAME, NEXUS_SIZE, None, &[CHILD_1.to_string()]) + .await + .unwrap(); + let nexus = nexus_lookup(NEXUS_NAME).unwrap(); + // child will stay in a degraded state because we are not rebuilding + nexus.add_child(CHILD_2, true).await.unwrap(); + + // it should not be possible to fault the only healthy child + assert!(nexus.fault_child(CHILD_1, Reason::Unknown).await.is_err()); + // it should be possible to fault an unhealthy child + assert!(nexus.fault_child(CHILD_2, Reason::Unknown).await.is_ok()); + + mayastor_env_stop(0); + }); + }) + .unwrap(); +} diff --git a/mayastor/tests/io.rs b/mayastor/tests/io.rs index 02ca38f62..6719d39b4 100644 --- a/mayastor/tests/io.rs +++ b/mayastor/tests/io.rs @@ -41,7 +41,7 @@ fn io_test() { // only execute one future per reactor loop. async fn start() { bdev_create(BDEVNAME).await.expect("failed to create bdev"); - bdev_io::write_some(BDEVNAME).await.unwrap(); - bdev_io::read_some(BDEVNAME).await.unwrap(); + bdev_io::write_some(BDEVNAME, 0, 0xff).await.unwrap(); + bdev_io::read_some(BDEVNAME, 0, 0xff).await.unwrap(); mayastor_env_stop(0); } diff --git a/mayastor/tests/lvs_pool.rs b/mayastor/tests/lvs_pool.rs index f67069e4f..8f65c3969 100644 --- a/mayastor/tests/lvs_pool.rs +++ b/mayastor/tests/lvs_pool.rs @@ -46,8 +46,6 @@ fn lvs_pool_test() { Lvs::create_or_import(CreatePoolRequest { name: "tpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], - block_size: 0, - io_if: 0, }) .await .unwrap(); @@ -60,8 +58,6 @@ fn lvs_pool_test() { Lvs::create_or_import(CreatePoolRequest { name: "tpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], - block_size: 0, - io_if: 0, }) .await .is_ok(), @@ -173,8 +169,6 @@ fn lvs_pool_test() { let pool2 = Lvs::create_or_import(CreatePoolRequest { name: "tpool2".to_string(), disks: vec!["malloc:///malloc0?size_mb=64".to_string()], - block_size: 0, - io_if: 0, }) .await .unwrap(); @@ -206,8 +200,6 @@ fn lvs_pool_test() { let pool = Lvs::create_or_import(CreatePoolRequest { name: "tpool".to_string(), disks: vec!["aio:///tmp/disk1.img".to_string()], - block_size: 0, - io_if: 0, }) .await .unwrap(); @@ -336,8 +328,6 @@ fn lvs_pool_test() { let pool = Lvs::create_or_import(CreatePoolRequest { name: "tpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], - block_size: 0, - io_if: 0, }) .await .unwrap(); @@ -371,8 +361,6 @@ fn lvs_pool_test() { Lvs::create_or_import(CreatePoolRequest { name: "jpool".into(), disks: vec!["aio:///tmp/disk1.img".into()], - block_size: 0, - io_if: 0, }) .await .err() diff --git a/mayastor/tests/lvs_pool_rpc.rs b/mayastor/tests/lvs_pool_rpc.rs index 9865ac447..71840e002 100644 --- a/mayastor/tests/lvs_pool_rpc.rs +++ b/mayastor/tests/lvs_pool_rpc.rs @@ -33,8 +33,6 @@ fn lvs_pool_rpc() { pool_grpc::create(CreatePoolRequest { name: "tpool".to_string(), disks: vec!["aio:///tmp/disk1.img".into()], - block_size: 0, - io_if: 0, }) .await .unwrap(); @@ -43,8 +41,6 @@ fn lvs_pool_rpc() { pool_grpc::create(CreatePoolRequest { name: "tpool".to_string(), disks: vec!["aio:///tmp/disk1.img".into()], - block_size: 0, - io_if: 0, }) .await .unwrap(); diff --git a/mayastor/tests/nexus_rebuild.rs b/mayastor/tests/nexus_rebuild.rs index a4aa3d491..ddf1a2e6b 100644 --- a/mayastor/tests/nexus_rebuild.rs +++ b/mayastor/tests/nexus_rebuild.rs @@ -6,7 +6,7 @@ use tracing::error; use common::error_bdev; use mayastor::{ - bdev::{nexus_lookup, ChildStatus, VerboseError}, + bdev::{nexus_lookup, ChildState, Reason, VerboseError}, core::{MayastorCliArgs, MayastorEnvironment, Mthread, Reactor}, rebuild::{RebuildJob, RebuildState, SEGMENT_SIZE}, }; @@ -635,7 +635,10 @@ fn rebuild_fault_src() { .unwrap(); // allow the nexus futures to run reactor_poll!(10); - assert_eq!(nexus.children[1].status(), ChildStatus::Faulted); + assert_eq!( + nexus.children[1].state(), + ChildState::Faulted(Reason::RebuildFailed) + ); nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); }); @@ -669,7 +672,10 @@ fn rebuild_fault_dst() { .unwrap(); // allow the nexus futures to run reactor_poll!(10); - assert_eq!(nexus.children[1].status(), ChildStatus::Faulted); + assert_eq!( + nexus.children[1].state(), + ChildState::Faulted(Reason::RebuildFailed) + ); nexus_lookup(nexus_name()).unwrap().destroy().await.unwrap(); }); diff --git a/mayastor/tests/pool.rs b/mayastor/tests/pool.rs deleted file mode 100644 index 137d78474..000000000 --- a/mayastor/tests/pool.rs +++ /dev/null @@ -1,145 +0,0 @@ -use std::panic::catch_unwind; - -use mayastor::{ - core::{ - mayastor_env_stop, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - Share, - }, - pool::{create_pool, Pool, PoolsIter}, -}; -use rpc::mayastor::CreatePoolRequest; - -pub mod common; - -static DISKNAME1: &str = "/tmp/disk1.img"; - -#[test] -fn create_pool_legacy() { - common::delete_file(&[DISKNAME1.into()]); - common::truncate_file(DISKNAME1, 64 * 1024); - common::mayastor_test_init(); - let mut args = MayastorCliArgs::default(); - args.reactor_mask = "0x3".into(); - - let result = catch_unwind(|| { - MayastorEnvironment::new(args) - .start(|| { - // create a pool with legacy device names - Reactor::block_on(async { - create_pool(CreatePoolRequest { - name: "legacy".into(), - disks: vec![DISKNAME1.to_string()], - block_size: 0, - io_if: 0, - }) - .await - .unwrap(); - }); - - // create a pool using uri's - Reactor::block_on(async { - create_pool(CreatePoolRequest { - name: "uri".into(), - disks: vec!["malloc:///malloc0?size_mb=64".to_string()], - block_size: 0, - io_if: 0, - }) - .await - .unwrap(); - }); - - // should succeed to create the same pool with the same name and - // with the same bdev (idempotent) - - Reactor::block_on(async { - let pool = create_pool(CreatePoolRequest { - name: "uri".into(), - disks: vec!["malloc:///malloc0?size_mb=64".to_string()], - block_size: 0, - io_if: 0, - }) - .await; - - assert_eq!(pool.is_ok(), true); - }); - - // should fail to create the pool with same name and different - // bdev - Reactor::block_on(async { - let pool = create_pool(CreatePoolRequest { - name: "uri".into(), - disks: vec!["malloc:///malloc1?size_mb=64".to_string()], - block_size: 0, - io_if: 0, - }) - .await; - assert_eq!(pool.is_err(), true) - }); - - // validate some properties from the pool(s) - Reactor::block_on(async { - let pool = Pool::lookup("uri").unwrap(); - assert_eq!(pool.get_name(), "uri"); - let bdev = pool.get_base_bdev(); - assert_eq!(bdev.name(), "malloc0"); - assert_eq!( - bdev.bdev_uri().unwrap(), - format!( - "malloc:///malloc0?size_mb=64&uuid={}", - bdev.uuid_as_string() - ) - ); - }); - - // destroy the pool - Reactor::block_on(async { - let pool = Pool::lookup("uri").unwrap(); - pool.destroy().await.unwrap(); - }); - - // destroy the legacy pool - Reactor::block_on(async { - let pool = Pool::lookup("legacy").unwrap(); - pool.destroy().await.unwrap(); - }); - - // create the pools again - Reactor::block_on(async { - create_pool(CreatePoolRequest { - name: "uri".into(), - disks: vec!["malloc:///malloc0?size_mb=64".to_string()], - block_size: 0, - io_if: 0, - }) - .await - .unwrap(); - - create_pool(CreatePoolRequest { - name: "legacy".into(), - disks: vec![DISKNAME1.to_string()], - block_size: 0, - io_if: 0, - }) - .await - .unwrap(); - }); - - // validate they are there again and then destroy them - Reactor::block_on(async { - assert_eq!(PoolsIter::new().count(), 2); - for p in PoolsIter::new() { - p.destroy().await.unwrap(); - } - }); - - mayastor_env_stop(0); - }) - .unwrap(); - }); - - common::delete_file(&[DISKNAME1.into()]); - result.unwrap(); -} diff --git a/mayastor/tests/replica_snapshot.rs b/mayastor/tests/replica_snapshot.rs index 2a586be10..408ea4a52 100644 --- a/mayastor/tests/replica_snapshot.rs +++ b/mayastor/tests/replica_snapshot.rs @@ -11,6 +11,7 @@ use mayastor::{ MayastorEnvironment, Reactor, }, + lvs::Lvol, subsys, subsys::Config, }; @@ -25,6 +26,7 @@ static CFGNAME1: &str = "/tmp/child1.yaml"; static UUID1: &str = "00000000-76b6-4fcf-864d-1027d4038756"; static NXNAME: &str = "replica_snapshot_test"; +static NXNAME_SNAP: &str = "replica_snapshot_test-snap"; fn generate_config() { let mut config = Config::default(); @@ -35,9 +37,7 @@ fn generate_config() { config.nexus_opts.nvmf_nexus_port = 8440; let pool = subsys::Pool { name: "pool0".to_string(), - disks: vec![DISKNAME1.to_string()], - blk_size: 512, - io_if: 1, // AIO + disks: vec!["aio://".to_string() + &DISKNAME1.to_string()], replicas: Default::default(), }; config.pools = Some(vec![pool]); @@ -82,6 +82,26 @@ fn conf_mayastor() { } } +fn share_snapshot(t: u64) { + let msc = "../target/debug/mayastor-client"; + let output = Command::new(msc) + .args(&[ + "-p", + "10125", + "replica", + "share", + &Lvol::format_snapshot_name(UUID1, t), + "nvmf", + ]) + .output() + .expect("could not exec mayastor-client"); + + if !output.status.success() { + io::stderr().write_all(&output.stderr).unwrap(); + panic!("failed to configure mayastor"); + } +} + #[test] fn replica_snapshot() { generate_config(); @@ -99,41 +119,60 @@ fn replica_snapshot() { test_init!(); Reactor::block_on(async { - create_nexus().await; - bdev_io::write_some(NXNAME).await.unwrap(); + create_nexus(0).await; + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + // Issue an unimplemented vendor command custom_nvme_admin(0xc1) .await .expect_err("unexpectedly succeeded invalid nvme admin command"); - bdev_io::read_some(NXNAME).await.unwrap(); - create_snapshot().await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + let t = create_snapshot().await.unwrap(); // Check that IO to the replica still works after creating a snapshot - // Checking the snapshot itself is tbd - bdev_io::read_some(NXNAME).await.unwrap(); - bdev_io::write_some(NXNAME).await.unwrap(); - bdev_io::read_some(NXNAME).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::write_some(NXNAME, 1024, 0xaa).await.unwrap(); + bdev_io::read_some(NXNAME, 1024, 0xaa).await.unwrap(); + // Share the snapshot and create a new nexus + share_snapshot(t); + create_nexus(t).await; + bdev_io::write_some(NXNAME_SNAP, 0, 0xff) + .await + .expect_err("writing to snapshot should fail"); + // Verify that data read from snapshot remains unchanged + bdev_io::write_some(NXNAME, 0, 0x55).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0x55).await.unwrap(); + bdev_io::read_some(NXNAME_SNAP, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME_SNAP, 1024, 0).await.unwrap(); }); mayastor_env_stop(0); common::delete_file(&[DISKNAME1.to_string()]); } -async fn create_nexus() { - let ch = vec![ - "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:".to_string() - + &UUID1.to_string(), - ]; +async fn create_nexus(t: u64) { + let mut child_name = "nvmf://127.0.0.1:8430/nqn.2019-05.io.openebs:" + .to_string() + + &UUID1.to_string(); + let mut nexus_name = NXNAME; + if t > 0 { + child_name = Lvol::format_snapshot_name(&child_name, t); + nexus_name = NXNAME_SNAP; + } + let ch = vec![child_name]; - nexus_create(NXNAME, 64 * 1024 * 1024, None, &ch) + nexus_create(&nexus_name, 64 * 1024 * 1024, None, &ch) .await .unwrap(); } -async fn create_snapshot() -> Result<(), CoreError> { +async fn create_snapshot() -> Result { let h = BdevHandle::open(NXNAME, true, false).unwrap(); - h.create_snapshot() + let t = h + .create_snapshot() .await .expect("failed to create snapshot"); - Ok(()) + Ok(t) } async fn custom_nvme_admin(opc: u8) -> Result<(), CoreError> { diff --git a/mayastor/tests/replica_timeout.rs b/mayastor/tests/replica_timeout.rs index e9df5aa46..7be08ad5f 100644 --- a/mayastor/tests/replica_timeout.rs +++ b/mayastor/tests/replica_timeout.rs @@ -75,8 +75,8 @@ fn replica_stop_cont() { Reactor::block_on(async { create_nexus(true).await; - bdev_io::write_some(NXNAME).await.unwrap(); - bdev_io::read_some(NXNAME).await.unwrap(); + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); ms.sig_stop(); let handle = thread::spawn(move || { // Sufficiently long to cause a controller reset @@ -85,11 +85,11 @@ fn replica_stop_cont() { ms.sig_cont(); ms }); - bdev_io::read_some(NXNAME) + bdev_io::read_some(NXNAME, 0, 0xff) .await .expect_err("should fail read after controller reset"); ms = handle.join().unwrap(); - bdev_io::read_some(NXNAME) + bdev_io::read_some(NXNAME, 0, 0xff) .await .expect("should read again after Nexus child continued"); nexus_lookup(NXNAME).unwrap().destroy().await.unwrap(); @@ -116,20 +116,20 @@ fn replica_term() { Reactor::block_on(async { create_nexus(false).await; - bdev_io::write_some(NXNAME).await.unwrap(); - bdev_io::read_some(NXNAME).await.unwrap(); + bdev_io::write_some(NXNAME, 0, 0xff).await.unwrap(); + bdev_io::read_some(NXNAME, 0, 0xff).await.unwrap(); }); ms1.sig_term(); thread::sleep(time::Duration::from_secs(1)); Reactor::block_on(async { - bdev_io::read_some(NXNAME) + bdev_io::read_some(NXNAME, 0, 0xff) .await .expect("should read with 1 Nexus child terminated"); }); ms2.sig_term(); thread::sleep(time::Duration::from_secs(1)); Reactor::block_on(async { - bdev_io::read_some(NXNAME) + bdev_io::read_some(NXNAME, 0, 0xff) .await .expect_err("should fail read with 2 Nexus children terminated"); }); diff --git a/mayastor/tests/reset.rs b/mayastor/tests/reset.rs index 4e2020fcd..a28a15a09 100644 --- a/mayastor/tests/reset.rs +++ b/mayastor/tests/reset.rs @@ -59,6 +59,8 @@ fn nexus_reset_mirror() { "128".to_string(), "-y".to_string(), "/tmp/child1.yaml".to_string(), + "-g".to_string(), + "127.0.0.1:10124".to_string(), ]; let _ms1 = MayastorProcess::new(Box::from(args)).unwrap(); @@ -68,6 +70,8 @@ fn nexus_reset_mirror() { "128".to_string(), "-y".to_string(), "/tmp/child2.yaml".to_string(), + "-g".to_string(), + "127.0.0.1:10125".to_string(), ]; let _ms2 = MayastorProcess::new(Box::from(args)).unwrap(); diff --git a/mayastor/tests/yaml_config.rs b/mayastor/tests/yaml_config.rs index 0d7d0b38d..8afbfae37 100644 --- a/mayastor/tests/yaml_config.rs +++ b/mayastor/tests/yaml_config.rs @@ -122,8 +122,6 @@ fn yaml_pool_tests() { let pool = subsys::Pool { name: "tpool".to_string(), disks: vec!["/tmp/disk1.img".into()], - blk_size: 512, - io_if: 1, replicas: Default::default(), }; @@ -236,6 +234,8 @@ fn yaml_multi_maya() { "128".into(), "-y".into(), "/tmp/first.yaml".into(), + "-g".to_string(), + "127.0.0.1:10124".to_string(), ]; let second_args = vec![ @@ -243,6 +243,8 @@ fn yaml_multi_maya() { "128".into(), "-y".into(), "/tmp/second.yaml".into(), + "-g".to_string(), + "127.0.0.1:10125".to_string(), ]; run_test(Box::from(first_args), |ms1| { diff --git a/nix/pkgs/libspdk/default.nix b/nix/pkgs/libspdk/default.nix index 8907b781f..f8bde2f56 100644 --- a/nix/pkgs/libspdk/default.nix +++ b/nix/pkgs/libspdk/default.nix @@ -74,6 +74,7 @@ let buildPhase = '' make -j`nproc` + find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete #find . -type f -name 'librte_vhost.a' -delete diff --git a/nix/pkgs/mayastor/default.nix b/nix/pkgs/mayastor/default.nix index dbb63bff7..867915a89 100644 --- a/nix/pkgs/mayastor/default.nix +++ b/nix/pkgs/mayastor/default.nix @@ -41,7 +41,7 @@ let buildProps = rec { name = "mayastor"; #cargoSha256 = "0000000000000000000000000000000000000000000000000000"; - cargoSha256 = "1p5fng76iifcy5qpbfqqrpwj3dmbi66kr1nl7j0bw6zb8f5a6src"; + cargoSha256 = "1m8097h48zz4d20gk9q1aw25548m2aqfxjlr6nck7chrqccvwr54"; inherit version; src = whitelistSource ../../../. [ "Cargo.lock" diff --git a/nix/test/rebuild/node1-mayastor-config.yaml b/nix/test/rebuild/node1-mayastor-config.yaml index 5e2cf72fe..0c2ca156f 100644 --- a/nix/test/rebuild/node1-mayastor-config.yaml +++ b/nix/test/rebuild/node1-mayastor-config.yaml @@ -12,8 +12,6 @@ err_store_opts: pools: - name: "pool1" disks: - - "/dev/vdb" - blk_size: 4096 - io_if: 1 + - "aio:///dev/vdb?blk_size=4096" replicas: [] implicit_share_base: true diff --git a/nix/test/rebuild/node2-mayastor-config.yaml b/nix/test/rebuild/node2-mayastor-config.yaml index 24d56911c..297ce1a48 100644 --- a/nix/test/rebuild/node2-mayastor-config.yaml +++ b/nix/test/rebuild/node2-mayastor-config.yaml @@ -12,8 +12,6 @@ err_store_opts: pools: - name: "pool2" disks: - - "/dev/vdb" - blk_size: 4096 - io_if: 1 + - "aio:///dev/vdb?blk_size=4096" replicas: [] implicit_share_base: true diff --git a/nvmeadm/tests/discovery_test.rs b/nvmeadm/tests/discovery_test.rs index 1d25108e7..2efd1b805 100644 --- a/nvmeadm/tests/discovery_test.rs +++ b/nvmeadm/tests/discovery_test.rs @@ -11,39 +11,30 @@ use std::{ time::Duration, }; -static CONFIG_TEXT: &str = "[Malloc] - NumberOfLuns 1 - LunSizeInMB 64 - BlockSize 4096 -[Nvmf] - AcceptorPollRate 10000 - ConnectionScheduler RoundRobin -[Transport] - Type TCP - # reduce memory requirements - NumSharedBuffers 64 -[Subsystem1] - NQN nqn.2019-05.io.openebs:disk2 - Listen TCP 127.0.0.1:NVMF_PORT - AllowAnyHost Yes - SN MAYASTOR0000000001 - MN NEXUSController1 - MaxNamespaces 1 - Namespace Malloc0 1 +static CONFIG_TEXT: &str = "sync_disable: true +base_bdevs: + - uri: \"malloc:///Malloc0?size_mb=64&blk_size=4096&uuid=dbe4d7eb-118a-4d15-b789-a18d9af6ff29\" +nexus_opts: + nvmf_nexus_port: 4422 + nvmf_replica_port: NVMF_PORT + iscsi_enable: false +nvmf_tcp_tgt_conf: + max_namespaces: 2 # although not used we still have to reduce mem requirements for iSCSI -[iSCSI] - MaxSessions 1 - MaxConnectionsPerSession 1 +iscsi_tgt_conf: + max_sessions: 1 + max_connections_per_session: 1 +implicit_share_base: true "; -const CONFIG_FILE: &str = "/tmp/nvmeadm_nvmf_target.config"; +const CONFIG_FILE: &str = "/tmp/nvmeadm_nvmf_target.yaml"; -const SERVED_DISK_NQN: &str = "nqn.2019-05.io.openebs:disk2"; +const SERVED_DISK_NQN: &str = + "nqn.2019-05.io.openebs:dbe4d7eb-118a-4d15-b789-a18d9af6ff29"; const TARGET_PORT: u32 = 9523; -// Writes out a config file for spdk, but with the specified port for nvmf to -// use +/// Write out a config file for Mayastor, but with the specified port for nvmf fn create_config_file(config_file: &str, nvmf_port: &str) { let path = Path::new(config_file); let mut config = match File::create(&path) { @@ -67,8 +58,8 @@ fn create_config_file(config_file: &str, nvmf_port: &str) { } } -// Waits for spdk to start up and accept connections on the specified port -fn wait_for_spdk_ready(listening_port: u32) -> Result<(), String> { +/// Wait for Mayastor to start up and accept connections on the specified port +fn wait_for_mayastor_ready(listening_port: u32) -> Result<(), String> { let dest = format!("127.0.0.1:{}", listening_port); let socket_addr: SocketAddr = dest.parse().expect("Badly formed address"); @@ -96,20 +87,20 @@ fn wait_for_spdk_ready(listening_port: u32) -> Result<(), String> { } pub struct NvmfTarget { - /// The std::process::Child for the process running spdk + /// The std::process::Child for the process running Mayastor pub spdk_proc: std::process::Child, } impl NvmfTarget { pub fn new(config_file: &str, nvmf_port: &str) -> Result { create_config_file(config_file, nvmf_port); - let spdk_proc = Command::new("../target/debug/spdk") - .arg("-c") + let spdk_proc = Command::new("../target/debug/mayastor") + .arg("-y") .arg(CONFIG_FILE) .spawn() .expect("Failed to start spdk!"); - wait_for_spdk_ready(TARGET_PORT).expect("spdk not ready"); + wait_for_mayastor_ready(TARGET_PORT).expect("mayastor not ready"); let _ = DiscoveryBuilder::default() .transport("tcp".to_string()) diff --git a/rpc/proto/mayastor.proto b/rpc/proto/mayastor.proto index bf61d8d04..614dc50ef 100644 --- a/rpc/proto/mayastor.proto +++ b/rpc/proto/mayastor.proto @@ -64,25 +64,19 @@ service Mayastor { // Snapshot operations rpc CreateSnapshot (CreateSnapshotRequest) returns (CreateSnapshotReply) {} + + // Enumerate block devices on current host + rpc ListBlockDevices (ListBlockDevicesRequest) returns (ListBlockDevicesReply) {} } // Means no arguments or no return value. message Null {} -// I/O interface used for underlying disks in a pool -enum PoolIoIf { - POOL_IO_AUTO = 0; // prefer uring if supported, falling back to aio - POOL_IO_AIO = 1; // Linux AIO - POOL_IO_URING = 2; // io_uring, requires Linux 5.1 -} - // Create pool arguments. // Currently we support only concatenation of disks (RAID-0). message CreatePoolRequest { string name = 1; // name of the pool repeated string disks = 2; // disk device paths or URIs to be claimed by the pool - uint32 block_size = 3; // when using files, we need to specify the block_size - PoolIoIf io_if = 4; // I/O interface } // State of the storage pool (terminology comes from ZFS). @@ -377,3 +371,39 @@ message BdevUri { message CreateReply { string name = 1; } + +message BlockDevice { + message Partition { + string parent = 1; // devname of parent device to which this partition belongs + uint32 number = 2; // partition number + string name = 3; // partition name + string scheme = 4; // partition scheme: gpt, dos, ... + string typeid = 5; // partition type identifier + string uuid = 6; // UUID identifying partition + } + message Filesystem { + string fstype = 1; // filesystem type: ext3, ntfs, ... + string label = 2; // volume label + string uuid = 3; // UUID identifying the volume (filesystem) + string mountpoint = 4; // path where filesystem is currently mounted + } + string devname = 1; // entry in /dev associated with device + string devtype = 2; // currently "disk" or "partition" + uint32 devmajor = 3; // major device number + uint32 devminor = 4; // minor device number + string model = 5; // device model - useful for identifying mayastor devices + string devpath = 6; // official device path + repeated string devlinks = 7; // list of udev generated symlinks by which device may be identified + uint64 size = 8; // size of device in (512 byte) blocks + Partition partition = 9; // partition information in case where device represents a partition + Filesystem filesystem = 10; // filesystem information in case where a filesystem is present + bool available = 11; // identifies if device is available for use (ie. is not "currently" in use) +} + +message ListBlockDevicesRequest { + bool all = 1; // list "all" block devices found (not just "available" ones) +} + +message ListBlockDevicesReply { + repeated BlockDevice devices = 1; +} diff --git a/spdk-sys/build.rs b/spdk-sys/build.rs index 6c2a34ebd..0d90f995b 100644 --- a/spdk-sys/build.rs +++ b/spdk-sys/build.rs @@ -31,6 +31,10 @@ fn build_wrapper() { .include("spdk/include") .file("logwrapper.c") .compile("logwrapper"); + cc::Build::new() + .include("spdk/include") + .file("nvme_helper.c") + .compile("nvme_helper"); } fn main() { @@ -77,6 +81,8 @@ fn main() { .whitelist_function("^bdev.*") .whitelist_function("^nbd_.*") .whitelist_function("^vbdev_.*") + .whitelist_function("^nvme_cmd_.*") + .whitelist_function("^nvme_status_.*") .blacklist_type("^longfunc") .whitelist_var("^NVMF.*") .whitelist_var("^SPDK.*") @@ -117,4 +123,5 @@ fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=wrapper.h"); println!("cargo:rerun-if-changed=logwrapper.c"); + println!("cargo:rerun-if-changed=nvme_helper.c"); } diff --git a/spdk-sys/build.sh b/spdk-sys/build.sh index 7821b9abc..d3fa6eceb 100755 --- a/spdk-sys/build.sh +++ b/spdk-sys/build.sh @@ -22,6 +22,7 @@ pushd spdk || { echo "Can not find spdk directory"; exit; } make -j $(nproc) # delete things we for sure do not want link +find . -type f -name 'libspdk_event_nvmf.a' -delete find . -type f -name 'libspdk_ut_mock.a' -delete #find . -type f -name 'librte_vhost.a' -delete diff --git a/spdk-sys/nvme_helper.c b/spdk-sys/nvme_helper.c new file mode 100644 index 000000000..4e97c8ee1 --- /dev/null +++ b/spdk-sys/nvme_helper.c @@ -0,0 +1,60 @@ +#include "nvme_helper.h" + +#include +#include +#include +#include + +uint32_t * +nvme_cmd_cdw10_get(struct spdk_nvme_cmd *cmd) { + return &cmd->cdw10; +} + +uint32_t * +nvme_cmd_cdw11_get(struct spdk_nvme_cmd *cmd) { + return &cmd->cdw11; +} + +struct spdk_nvme_status * +nvme_status_get(struct spdk_nvme_cpl *cpl) { + return &cpl->status; +} + +uint16_t * +nvme_status_raw_get(struct spdk_nvme_cpl *cpl) { + return &cpl->status_raw; +} + +/* Based on spdk_bdev_nvme_admin_passthru with the check for desc->write + * removed. + * spdk_bdev_nvme_io_passthru has a comment on parsing the command to + * determine read or write. As we only have one user, just remove the check. + */ +int +spdk_bdev_nvme_admin_passthru_ro(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, + spdk_bdev_io_completion_cb cb, void *cb_arg) +{ + struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc); + struct spdk_bdev_io *bdev_io; + struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); + + bdev_io = bdev_channel_get_io(channel); + if (!bdev_io) { + return -ENOMEM; + } + + bdev_io->internal.ch = channel; + bdev_io->internal.desc = desc; + bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; + bdev_io->u.nvme_passthru.cmd = *cmd; + bdev_io->u.nvme_passthru.buf = buf; + bdev_io->u.nvme_passthru.nbytes = nbytes; + bdev_io->u.nvme_passthru.md_buf = NULL; + bdev_io->u.nvme_passthru.md_len = 0; + + bdev_io_init(bdev_io, bdev, cb_arg, cb); + + bdev_io_submit(bdev_io); + return 0; +} diff --git a/spdk-sys/nvme_helper.h b/spdk-sys/nvme_helper.h new file mode 100644 index 000000000..1f43d4fe6 --- /dev/null +++ b/spdk-sys/nvme_helper.h @@ -0,0 +1,19 @@ +#include +#include + +#include + +struct spdk_nvme_cmd; +struct spdk_nvme_cpl; +struct spdk_nvme_status; + +uint32_t *nvme_cmd_cdw10_get(struct spdk_nvme_cmd *cmd); +uint32_t *nvme_cmd_cdw11_get(struct spdk_nvme_cmd *cmd); + +struct spdk_nvme_status *nvme_status_get(struct spdk_nvme_cpl *cpl); +uint16_t *nvme_status_raw_get(struct spdk_nvme_cpl *cpl); + +int +spdk_bdev_nvme_admin_passthru_ro(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, + const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, + spdk_bdev_io_completion_cb cb, void *cb_arg); diff --git a/spdk-sys/wrapper.h b/spdk-sys/wrapper.h index 5a10f55a0..624ad9a8b 100644 --- a/spdk-sys/wrapper.h +++ b/spdk-sys/wrapper.h @@ -35,4 +35,4 @@ #include #include "logwrapper.h" - +#include "nvme_helper.h"